[clblas] 03/61: enable offline compilation of a subset of GEMM and TRSM on targeted device

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Fri Jul 24 22:49:42 UTC 2015


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch master
in repository clblas.

commit 434b38ed6d366d972dae1e05d59119125c354556
Author: Timmy <timmy.liu at amd.com>
Date:   Thu Apr 16 09:31:29 2015 -0500

    enable offline compilation of a subset of GEMM and TRSM on targeted device
---
 .gitignore                                         |    3 +
 README.md                                          |   65 +-
 doc/README-BinaryCacheOnDisk.txt                   |   69 +
 doc/README-FunctorConcepts.txt                     |  100 +
 doc/README-HowToIntroduceFunctors.txt              |  402 ++
 doc/README-TransformASolverIntoAFunctor.txt        |  382 ++
 src/CMakeLists.txt                                 |   63 +
 src/clBLAS.def                                     |   28 +
 src/clBLAS.h                                       |  622 ++
 src/client/clfunc_common.hpp                       |    1 +
 src/client/clfunc_xgemm.hpp                        |   34 +-
 src/client/client.cpp                              |   19 +-
 src/flags_public.txt                               |    4 +
 src/include/binary_lookup.h                        |  273 +
 src/include/devinfo.h                              |    2 +
 src/include/md5sum.h                               |   50 +
 src/include/rwlock.h                               |  117 +
 src/library/CMakeLists.txt                         |  265 +-
 src/library/bingen.cmake                           |  141 +
 src/library/blas/fill.cc                           |  272 +
 src/library/blas/functor/bonaire.cc                |   93 +
 src/library/blas/functor/functor.cc                |  117 +
 src/library/blas/functor/functor_fill.cc           |  156 +
 src/library/blas/functor/functor_selector.cc       |  342 ++
 src/library/blas/functor/functor_xgemm.cc          |  323 +
 src/library/blas/functor/functor_xscal.cc          |  410 ++
 src/library/blas/functor/functor_xscal_generic.cc  |  439 ++
 src/library/blas/functor/functor_xtrsm.cc          |  336 ++
 src/library/blas/functor/gcn_dgemm.cc              | 1035 ++++
 src/library/blas/functor/gcn_dgemmCommon.cc        |  997 +++
 src/library/blas/functor/gcn_dgemmSmallMatrices.cc |  654 ++
 src/library/blas/functor/gcn_sgemm.cc              |  556 ++
 src/library/blas/functor/gcn_sgemmSmallMatrices.cc |  558 ++
 src/library/blas/functor/gpu_dtrsm.cc              |  823 +++
 src/library/blas/functor/hawaii.cc                 |  167 +
 .../blas/functor/hawaii_dgemmChannelConflict.cc    |  159 +
 .../blas/functor/hawaii_dgemmSplitKernel.cc        |  670 ++
 .../blas/functor/hawaii_sgemmBranchKernel.cc       |  392 ++
 .../blas/functor/hawaii_sgemmSplitKernel.cc        |  834 +++
 src/library/blas/functor/include/BinaryBuild.h     |   10 +
 src/library/blas/functor/include/atomic_counter.h  |  173 +
 src/library/blas/functor/include/bonaire.h         |   41 +
 src/library/blas/functor/include/functor.h         |  496 ++
 src/library/blas/functor/include/functor_fill.h    |   99 +
 .../functor/include/functor_hawaii_dgemm_NT_MN48.h |  210 +
 .../blas/functor/include/functor_selector.h        |  149 +
 src/library/blas/functor/include/functor_utils.h   |  116 +
 src/library/blas/functor/include/functor_xgemm.h   |  213 +
 src/library/blas/functor/include/functor_xscal.h   |  207 +
 .../blas/functor/include/functor_xscal_generic.h   |  173 +
 src/library/blas/functor/include/functor_xtrsm.h   |  203 +
 src/library/blas/functor/include/gcn_dgemm.h       |   59 +
 src/library/blas/functor/include/gcn_dgemmCommon.h |   22 +
 .../blas/functor/include/gcn_dgemmSmallMatrices.h  |   27 +
 src/library/blas/functor/include/gcn_sgemm.h       |   62 +
 .../blas/functor/include/gcn_sgemmSmallMatrices.h  |   27 +
 src/library/blas/functor/include/gpu_dtrsm.h       |   28 +
 src/library/blas/functor/include/hawaii.h          |   41 +
 .../functor/include/hawaii_dgemmChannelConflict.h  |   22 +
 .../blas/functor/include/hawaii_dgemmSplitKernel.h |   46 +
 .../functor/include/hawaii_sgemmBranchKernel.h     |   50 +
 .../blas/functor/include/hawaii_sgemmSplitKernel.h |   46 +
 src/library/blas/functor/include/tahiti.h          |   41 +
 src/library/blas/functor/tahiti.cc                 |  120 +
 src/library/blas/generic/binary_lookup.cc          |  685 +++
 src/library/blas/generic/common.c                  |   25 +-
 src/library/blas/generic/common2.cc                |   98 +
 src/library/blas/generic/functor_cache.cc          |   80 +
 src/library/blas/generic/solution_seq_make.c       |    4 +-
 src/library/blas/gens/blas_kgen.h                  |    3 -
 src/library/blas/gens/blas_subgroup.c              |    6 +-
 src/library/blas/gens/clTemplates/dgemm_NT_MN48.cl |  347 ++
 .../gens/clTemplates/dgemm_gcn_SmallMatrices.cl    | 1159 ++++
 src/library/blas/gens/clTemplates/dgemm_hawai.cl   | 6371 ++++++++++++++++++++
 .../clTemplates/dgemm_hawaiiChannelConfilct.cl     |  152 +
 .../gens/clTemplates/dgemm_hawaiiSplitKernel.cl    | 5043 ++++++++++++++++
 src/library/blas/gens/clTemplates/dtrsm_gpu.cl     | 2004 ++++++
 src/library/blas/gens/clTemplates/sgemm_gcn.cl     | 2083 +++++++
 .../gens/clTemplates/sgemm_gcn_SmallMatrices.cl    |  786 +++
 .../gens/clTemplates/sgemm_hawaiiSplitKernel.cl    | 6158 +++++++++++++++++++
 src/library/blas/include/clblas-internal.h         |   28 +
 src/library/blas/init.c                            |   12 +
 src/library/blas/matrix.c                          |  979 +++
 src/library/blas/xgemm.c                           |  783 ---
 src/library/blas/xgemm.cc                          |  328 +
 src/library/blas/xscal.cc                          |  340 ++
 src/library/blas/xtrsm.c                           |  249 -
 src/library/blas/xtrsm.cc                          |  333 +
 src/library/common/devinfo.c                       |    6 +
 src/library/common/md5sum.c                        |  378 ++
 src/library/common/rwlock.c                        |  172 +
 src/library/tools/bingen/CMakeLists.txt            |   33 +
 src/library/tools/bingen/bingen.cpp                |  512 ++
 src/library/tools/ktest/CMakeLists.txt             |   34 +-
 src/library/tools/tplgen/tplgen.cpp                |   85 +-
 src/library/tools/tune/CMakeLists.txt              |   33 +-
 src/library/tools/tune/tune.c                      |    5 +-
 src/samples/example_csscal.c                       |    3 +-
 src/tests/correctness/test-correctness.cpp         |    3 +-
 src/tests/performance/test-performance.cpp         |    5 +-
 100 files changed, 42785 insertions(+), 1194 deletions(-)

diff --git a/.gitignore b/.gitignore
index 7ae9f4d..59cf9c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,3 +17,6 @@
 
 # Generated kernel template files
 *.clT
+
+# flags.txt file
+*flags.txt
diff --git a/README.md b/README.md
index 728a3c0..9847dd6 100644
--- a/README.md
+++ b/README.md
@@ -20,6 +20,24 @@ library does generate and enqueue optimized OpenCL kernels, relieving
 the user from the task of writing, optimizing and maintaining kernel
 code themselves.
 
+## clBLAS update notes 04/2015
+-   A subset of GEMM and TRSM can be off-line compiled for Hawaii, Bonaire and Tahiti device at compile-time. This feature
+    eliminates the overhead of calling clBuildProgram() at run-time.
+-   Off-line compilation can be done with OpenCL 1.1, OpenCL 1.2 and OpenCl 2.0 runtime. However, for better
+    performance OpenCL 2.0 is recommended. Library user can select "OCL_VERSION" from CMake to ensure the library with
+    OpenCL version. It is library user's responsibility to ensure compatible hardware and driver.
+-   Added flags_public.txt file that contains OpenCL compiler flags used by off-line compilation. The flags_public.txt
+    will only be loaded when OCL_VERSION is 2.0.
+-   User can off-line compile one or more supported device by selecting 
+    OCL_OFFLINE_BUILD_BONAIRE_KERNEL
+    OCL_OFFLINE_BUILD_HAWII_KERNEL
+    OCL_OFFLINE_BUILD_TAHITI_KERNEL.
+    However, compile for more than one device at a time might result in running out of heap memory. Thus, compile for
+    one device at a time is recommended.
+-   User may also supply specific OpenCL compiler path with OCL_COMPILER_DIR or the library will load default OpenCL compiler.
+-   The minimum driver requirement for off-line compilation is 14.502.
+    
+
 ## clBLAS library user documentation
 
 [Library and API documentation][] for developers is available online as
@@ -48,15 +66,12 @@ how to contribute code to this open source project. The code in the
 be made against the /develop branch.
 
 ## License
-
-The source for clBLAS is licensed under the [Apache License, Version
-2.0][]
+The source for clBLAS is licensed under the [Apache License, Version 2.0]( http://www.apache.org/licenses/LICENSE-2.0 )
 
 ## Example
+The simple example below shows how to use clBLAS to compute an OpenCL accelerated SGEMM
 
-The simple example below shows how to use clBLAS to compute an OpenCL
-accelerated SGEMM
-
+```c
     #include <sys/types.h>
     #include <stdio.h>
 
@@ -171,42 +186,30 @@ accelerated SGEMM
 
         return ret;
     }
+```
 
 ## Build dependencies
-
 ### Library for Windows
-
--   Windows® 7/8
-
--   Visual Studio 2010 SP1, 2012
-
--   An OpenCL SDK, such as APP SDK 2.9
-
--   Latest CMake
+*  Windows® 7/8
+*  Visual Studio 2010 SP1, 2012
+*  An OpenCL SDK, such as APP SDK 2.8
+*  Latest CMake
 
 ### Library for Linux
-
--   GCC 4.6 and onwards
-
--   An OpenCL SDK, such as APP SDK 2.9
-
--   Latest CMake
+*  GCC 4.6 and onwards
+*  An OpenCL SDK, such as APP SDK 2.9
+*  Latest CMake
 
 ### Library for Mac OSX
-
--   Recommended to generate Unix makefiles with cmake
+*  Recommended to generate Unix makefiles with cmake
 
 ### Test infrastructure
-
--   Googletest v1.6
-
--   ACML on windows/linux; Accelerate on Mac OSX
-
--   Latest Boost
+*  Googletest v1.6
+*  ACML on windows/linux; Accelerate on Mac OSX
+*  Latest Boost
 
 ### Performance infrastructure
-
--   Python
+* Python
 
   [Library and API documentation]: http://clmathlibraries.github.io/clBLAS/
   [clmath at googlegroups.com]: https://groups.google.com/forum/#!forum/clmath
diff --git a/doc/README-BinaryCacheOnDisk.txt b/doc/README-BinaryCacheOnDisk.txt
new file mode 100644
index 0000000..5097a16
--- /dev/null
+++ b/doc/README-BinaryCacheOnDisk.txt
@@ -0,0 +1,69 @@
+S. Chauveau
+CAPS Entreprise
+clBLAS Project
+------------------------------
+April 30,2014
+
+
+The implementation of a binary cache for CL programs can be found in 
+files src/include/binary_lookup.h and src/library/blas/generic/binary_lookup.cc
+
+The cache is currently disabled by default. It can be enabled by
+setting the environment variable 'CLBLAS_CACHE_PATH' to the directory
+containing the cache entries.
+
+In the code itself, accesses to the cache are controlled by the
+BinaryLookup class. A typical cache query looks as follow:
+
+   (1) Create a local instance of BinaryLookup 
+   
+   (2) Specify the additional characteristics (i.e. variants) of the
+       requested program. That information combined with the program name
+       and the OpenCL context and device shall form a unique signature
+       for the binary program.
+   
+   (3) Perform the effective search by calling the 'found' method
+   
+   (4a) If the search was successful then cl_program can be retrieved 
+       by a call to the 'getProgram' method
+   
+   (4b) If the search was not successful then a cl_program 
+       must be created  and populated in the cache by a call 
+       to the 'setProgram' method.
+   
+   (5) Destroy the BinaryLookup local instance.
+
+
+So in practice a typical query shall looks as follow:
+
+  cl_program program  ;
+
+  // The program name is part of the signature and shall be unique 
+  const char * program_name = "... my unique program name ... " ;
+
+  BinaryLookup bl(context, device, program_name);
+
+  // Specify some additional information used to build a 
+  // unique signature for that cache entry
+               
+  bl.variantInt( vectorSize );
+  bl.variantInt( hasBorder );
+  ... 
+
+  // Perform the query 
+  if ( bl.found() ) 
+  {
+     // Success! use the cl_program retrieved from the cache
+     program = bl.getProgram();
+  }
+  else 
+  {
+     // Failure! we need to build the program 
+     program = build_my_program(context,device,vectorSize,...) ; 
+     // and inform the lookup object of the program
+     bl.setProgram(program);  
+     // and finally populate the cache
+     bl.populateCache() 
+  }
+
+  // The BinaryLookup shall now be destroyed 
diff --git a/doc/README-FunctorConcepts.txt b/doc/README-FunctorConcepts.txt
new file mode 100644
index 0000000..e0520ff
--- /dev/null
+++ b/doc/README-FunctorConcepts.txt
@@ -0,0 +1,100 @@
+S. Chauveau
+CAPS Entreprise
+April 30, 2014
+
+The Functor concept was introduced in clBLAS to simplify the creation 
+of specialized versions for dedicated architectures.
+
+The original system, referred as the 'Solver' system in this document, 
+is very centralized and not flexible enough to insert customized kernels.
+
+The Functor 
+===========
+
+A functor is simply a C++ object that provides an implementation of 
+a function. In the current case, that function is one of the BLAS calls 
+implemented in OpenCL. 
+
+The base class of all functors is clblasFunctor 
+  - see src/library/blas/functor/include/functor.h
+  - see src/library/blas/functor/functor.cc
+ 
+That class does not provide much by itself but it is supposed to be derived
+once for each BLAS function to be implemented. 
+
+For instance the clblasSgemmFunctor class will be the base class of all
+functors providing a generic or specific implementation of SGEMM.
+
+A generic functor is one that is applicable to all possible arguments of the
+function it implements. In most cases, there will be at least one generic
+functor that will simply call the existing Solver-based implementation of the
+function. For SGEMM, that is the class clblasSgemmFunctorFallback.
+
+A specific functor is one that is applicable to only a subset of the possible
+arguments of the function it implements. For instance, a SGEMM functor could
+only implement it for matrices of a given block size or only for square
+matrices or only for a specific device architecture (e.g. AMD Hawai) etc
+
+The Functor Selector 
+====================
+
+Multiple generic and specific functors may be available to implement each
+clBLAS call. The selection of the proper functor is delegated to the class
+clblasFunctorSelector whose default implementation typically returns the
+fallback functors.
+
+  - see src/library/blas/functor/include/functor_selector.h
+  - see src/library/blas/functor/functor_selector.cc
+
+So clblasFunctorSelector provides a large set of virtual selection methods.
+Typically, a method to select a specific functor will be provided for each
+supported BLAS function. Another method may be provided to select a generic 
+functor but that is not mandatory.
+
+The default implementation of clblasFunctorSelector is typically that the 
+specific selector is redirected to the generic one returning the fallback 
+functor (so using the existing Solver-based  implementation).  
+
+
+The class clblasFunctorSelector is supposed to be derived once for each
+supported architecture (e.g. Hawai, Tahiti, ...) and a single global instance
+of each of those derived classes shall be created. This is important because
+those instances register themselves in a global data structure that is later
+used to find the proper clblasFunctorSelector according to the architecture
+(see clblasFunctorSelector::find() )
+
+
+Functor Management & Cache
+==========================
+
+Each functor contains a reference counter that, when it reaches zero, causes
+the functor destruction. See the members clblasFunctor::retain() and
+clblasFunctor::release().
+
+Of course, to be efficient, functors must be reusable between BLAS calls so 
+some mechanisms must be implemented to manage the functors. 
+
+Some functors, such as the fallback functors, are independent of the 
+arguments and of the opencl context & device. Those can typically be 
+implemented using a single global instance that will never be destroyed.
+
+Other functors, such as those that manage a cl_program internally, are 
+dependent of the opencl context & device and sometimes of some arguments. 
+They need to be stored in caches using some information as keys. 
+
+In the current implementation, we propose that each functor class shall 
+implement its own private cache. Such functors shall not be created directly 
+using its constructor but via a dedicated 'provide' function (the name 'provide'
+is not mandatory) that will take care of managing the internal cache.
+
+The template class clblasFunctorCache<F> is provided as a simple
+implementation of a cache of functors of type F. Use of that cache is not a
+mandatory part of the functor design. Another strategies could be to keep a
+single instance of the functor and implement a cache for the cl_program or to
+implement a global cache shared by multiple functor classes.
+
+
+
+  
+
+
diff --git a/doc/README-HowToIntroduceFunctors.txt b/doc/README-HowToIntroduceFunctors.txt
new file mode 100644
index 0000000..58775f9
--- /dev/null
+++ b/doc/README-HowToIntroduceFunctors.txt
@@ -0,0 +1,402 @@
+S. Chauveau
+CAPS Entreprise
+clBLAS Project
+------------------------------
+April 30,2014
+
+This document describes the steps needed to introduce the Functor framework
+for a clBLAS function currently implemented using the previous Solver mechanism.
+
+The procedure is composed of the following steps:
+
+  (1) Declaration of a new base functor classes for the
+      considered clBLAS function. 
+
+  (2) Create a new fallback class derived from the class created
+      in (1) and using the existing Solver implementation. 
+
+  (3) Add the appropriate members to the clblasFunctorSolver 
+      class
+
+  (4) Modify the clBLAS function to use the functor. 
+
+In the following, we will consider the case of the XSCAL functions. 
+
+Initial State
+=============
+
+The XSCAL functions are originally implemented in the file src/library/blas/xscal.c
+
+Most of the Solver-based implementation occurs within the static function
+doScal() that is shared by all SCAL functions. clblasSscal(), clblasDscal()
+... are basically a single call to doScal()
+
+  clblasStatus doScal(...) 
+  { 
+    ...  // Do all the magic  
+  } 
+
+  clblasStatus
+  clblasSscal( size_t N,
+               float alpha,
+               cl_mem X,
+               size_t offx,
+               int incx,
+               cl_uint numCommandQueues,
+               cl_command_queue *commandQueues,
+               cl_uint numEventsInWaitList,
+               const cl_event *eventWaitList,
+               cl_event *events
+               )
+  {
+      CLBlasKargs kargs;
+
+      #ifdef DEBUG_SCAL
+      printf("\nSSCAL Called\n");
+      #endif
+
+      memset(&kargs, 0, sizeof(kargs));
+      kargs.dtype = TYPE_FLOAT;
+      kargs.alpha.argFloat = alpha;
+
+      return doScal(&kargs, N, X, offx, incx, numCommandQueues, commandQueues, numEventsInWaitList, eventWaitList, events);
+   }
+
+   clblasStatus clblasDscal(...) 
+   ... 
+   clblasStatus clblasCscal(...) 
+   ... 
+   clblasStatus clblasZscal(...) 
+   ... 
+   ...
+   
+Step 1:  Declaration of new base functor classes 
+================================================
+
+All the SCAL variants have identical arguments so it is reasonable to 
+use a templates to avoid rewriting similar classes again and again. 
+Using macros would also work. That is just a matter of personal taste.
+
+For convenience, the base template class will provide an internal
+structure type called Args that will be used to store the argument.
+Using an Args type is not strictly needed but it simplifies a lot the
+creation of the functor classes and of their future derived classes.
+ 
+So create a new file src/library/blas/functor/include/functor_xscal.h 
+containing the base functor class. In that specific case we also have 
+to consider the case of clblasZdscal() and clblasCsscal(), which explains
+why the template requires two types TX and Talpha. TX is the type of 
+the vector elements while Talpha is the type of the alpha argument.
+ 
+      
+      
+      template<typename TX, typename Talpha> 
+      class clblasXscalFunctor : public clblasFunctor 
+      {
+      public:
+      
+        // Structure used to store all XSCAL arguments
+        struct Args
+        {
+          size_t           N;
+          Talpha           alpha;
+          cl_mem           X;
+          size_t           offx;
+          int              incx;
+          cl_command_queue queue;
+          cl_uint          numEventsInWaitList;
+          const cl_event * eventWaitList;
+          cl_event *       events;
+        
+          Args(size_t N,
+               Talpha alpha,
+               cl_mem X,
+               size_t offx,
+               int    incx,
+               cl_command_queue queue,
+               cl_uint          numEventsInWaitList,
+               const cl_event * eventWaitList,
+               cl_event *       events) 
+            : N(N),
+              alpha(alpha),
+              X(X),
+              offx(offx),
+              incx(incx),
+              queue(queue),
+              numEventsInWaitList(numEventsInWaitList),
+              eventWaitList(eventWaitList),
+              events(events)
+          {
+          }
+        };
+      
+        virtual clblasStatus execute(Args & args) = 0;
+      };
+      
+      
+Using this template class it is now possible to define the base functor
+class corresponding to each SCAL function:
+      
+      
+      class clblasSscalFunctor: public clblasXscalFunctor<cl_float, cl_float> 
+      {
+      };
+      //
+      // Base class for all functors providing a DSCAL implementation
+      //
+      class clblasDscalFunctor: public clblasXscalFunctor<cl_double, cl_double>
+      {
+      };
+      
+      //
+      // Base class for all functors providing a CSCAL implementation
+      //
+      class clblasCscalFunctor: public clblasXscalFunctor<cl_float2, cl_float2>
+      {
+      };
+      
+      //
+      // Base class for all functors providing a ZSCAL implementation
+      //
+      class clblasZscalFunctor: public clblasXscalFunctor<cl_double2, cl_double2>
+      {
+      };
+      
+      //
+      // Base class for all functors providing a CSSCAL implementation
+      //
+      class clblasCsscalFunctor: public clblasXscalFunctor<cl_float2, cl_float>
+      {
+      };
+      
+      //
+      // Base class for all functors providing a ZDSCAL implementation
+      //
+      class clblasZdscalFunctor: public clblasXscalFunctor<cl_double2, cl_double>
+      {
+      };
+      
+
+A shorter alternative could be to use 'typedef' instead but using class
+offers the opportunity to extend the functor with specific features (i.e. 
+it is possible to add new members to a class but not to a typedef).
+
+
+
+STEP 2: Create the new fallback classes
+=======================================   
+
+In the following, we only consider the case of clblasSscal. 
+
+For each of the functor classes declared during STEP 1, we should now 
+declare the fallback functor class that will provide the Solver-based 
+implementation of the function. 
+
+We add the following src/library/blas/functor/include/functor_xscal.h
+
+   //
+   // Fallback functor for SSCAL : implement the sscal using the old solver mechanism
+   //
+   class clblasSscalFunctorFallback : public clblasSscalFunctor 
+   {
+     public:   // Inherited members from clblasFunctor 
+      virtual void retain();  
+      virtual void release();
+     public:  // Inherited members from clblasSscalFunctor 
+      virtual clblasStatus execute(Args & a);
+     public:
+      static clblasSscalFunctorFallback * provide ();
+    };
+
+
+The file src/library/blas/xscal.c is then renamed into src/library/blas/functor/functor_xscal.cc
+and modified as follow:
+
+First, the clblasSscal() function is transformed into clblasSscalFunctorFallback::execute() 
+
+    clblasStatus clblasSscalFunctorFallback::execute(Args & args)
+    {
+    
+      CLBlasKargs kargs;
+    
+      memset(&kargs, 0, sizeof(kargs));
+      kargs.dtype = TYPE_DOUBLE;
+      kargs.alpha.argDouble = args.alpha;
+    
+      return doScal(&kargs, 
+                    args.N, 
+                    args.X, 
+                    args.offx, 
+                    args.incx, 
+                    1,
+                    &args.queue, 
+                    args.numEventsInWaitList, 
+                    args.eventWaitList, 
+                    args.events);
+    
+    }
+    
+
+Second, a single instance of clblasSscalFunctorFallback is created as a static variable 
+that will be returned by the clblasSscalFunctorFallback::provide() member. 
+
+
+    static clblasSscalFunctorFallback dscal_fallback; 
+ 
+    clblasSscalFunctorFallback * clblasSscalFunctorFallback::provide ()
+    {
+      static clblasSscalFunctorFallback dscal_fallback;
+      return & dscal_fallback;
+    }
+
+
+Third, the retain() and release() members must be reimplemented to prevent the
+destruction of the unique clblasSscalFunctorFallback instance.
+
+   
+     void clblasSscalFunctorFallback::retain()
+     {
+       // clblasSscalFunctorFallback has a single global instance 
+       // and shall never be freed 
+     }
+
+
+     void clblasSscalFunctorFallback::release()
+     {
+       // clblasSscalFunctorFallback has a single global instance 
+       // and shall never be freed
+     }
+
+
+STEP 3: Add the appropriate members to the clblasFunctorSolver class
+=======================================================================   
+
+The clblasFunctorSolver shall typically be extended with two new virtual 
+methods: one to select a specific functor and one to select a generic functor. 
+
+Edit the file  src/library/blas/functor/include/functor_selector.h and add
+the following members declarations to the class clblasFunctorSelector:
+
+      // Provide a XSCAL Functor usable in all cases 
+
+    virtual clblasSscalFunctor  * select_sscal_generic();
+    virtual clblasDscalFunctor  * select_dscal_generic();
+    virtual clblasCscalFunctor  * select_cscal_generic();
+    virtual clblasZscalFunctor  * select_zscal_generic();
+    virtual clblasCsscalFunctor * select_csscal_generic();
+    virtual clblasZdscalFunctor * select_zdscal_generic();
+
+    // Provide XSCAL functors optimized for specific arguments
+
+    virtual clblasSscalFunctor  * select_sscal_specific(clblasSscalFunctor::Args & args);
+    virtual clblasDscalFunctor  * select_dscal_specific(clblasDscalFunctor::Args & args);
+    virtual clblasCscalFunctor  * select_cscal_specific(clblasCscalFunctor::Args & args);
+    virtual clblasZscalFunctor  * select_zscal_specific(clblasZscalFunctor::Args & args);
+    virtual clblasCsscalFunctor * select_csscal_specific(clblasCsscalFunctor::Args & args);
+    virtual clblasZdscalFunctor * select_zdscal_specific(clblasZdscalFunctor::Args & args);
+
+The naming scheme used here is not mandatory but is recommended to keep the
+whole infrastructure consistent.
+
+Then, add their default implementation in src/library/blas/functor/functor_selector.cc.
+
+
+    clblasSscalFunctor * 
+    clblasFunctorSelector::select_sscal_generic()  
+    { 
+        return clblasSscalFunctorFallback::provide();
+    }  
+
+    clblasSscalFunctor * 
+    clblasFunctorSelector::select_sscal_specific(clblasSscalFunctor::Args &)
+    { 
+        return this->select_sscal_generic() ;
+    }
+
+    ...
+
+
+STEP 4: Modify the clBLAS function to use the functor
+=====================================================
+
+Create a file src/library/blas/xscal.cc to reimplement the clBLAS API functions. 
+
+First, copy the original functions skeletons from the now obsolete file src/library/blas/xscal.c
+
+Then fill the skeleton to perform the following actions:
+  (A) Perform some consistency checks on the arguments      
+  (B) Create and initialize a local Args object  
+  (C) Obtain the clblasFunctorSelector corresponding 
+      to the current device (via the queue)
+  (D) Ask that selector for a specific functor
+  (E) Execute the functor
+  (F) Release the functor 
+ 
+The code shall typically look like that 
+      
+      extern "C" 
+      clblasStatus
+      clblasSscal(
+          size_t N,
+          float alpha,
+          cl_mem X,
+          size_t offx,
+          int incx,
+          cl_uint numCommandQueues,
+          cl_command_queue *commandQueues,
+          cl_uint numEventsInWaitList,
+          const cl_event *eventWaitList,
+          cl_event *events)
+      {
+      
+        CHECK_VECTOR_X( X , N,  offx, incx ) ;
+        CHECK_QUEUES( numCommandQueues, commandQueues ) ;
+        CHECK_WAITLIST( numEventsInWaitList, eventWaitList ) ;
+
+        if ( numCommandQueues>1 ) {
+          numCommandQueues = 1 ;  // No support for multi-device (yet)
+        }
+      
+        cl_command_queue queue = commandQueues[0]; 
+      
+        clblasSscalFunctor::Args args(N,
+                                      alpha,H
+                                      X,
+                                      offx,
+                                      incx,
+                                      queue,
+                                      numEventsInWaitList,
+                                      eventWaitList,
+                                      events);
+        
+         clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+         
+         functor = fselector->select_sscal_specific(args);
+      
+         clblasStatus res = functor->execute(args);
+      
+         functor->release();
+      
+         return res;
+      }
+      
+      
+Reminder: this is a C++ file so the API functions shall be declared extern "C" 
+      
+Remark: what is missing in that exemple is a proper verification of the arguments
+        (e.g. numCommandQueues shall be strictly positive. commandQueues[0] shall
+         be non-NULL, ...) 
+
+
+
+Conclusion
+==========
+
+After following all the steps above, the clBLAS APIs shall now use the Solver
+based implementation via their respective fallback functor.
+
+Other specialized functors can then be implemented and integrated in the  
+appropriate methods of the functor selector. 
+
+
+
diff --git a/doc/README-TransformASolverIntoAFunctor.txt b/doc/README-TransformASolverIntoAFunctor.txt
new file mode 100644
index 0000000..0535020
--- /dev/null
+++ b/doc/README-TransformASolverIntoAFunctor.txt
@@ -0,0 +1,382 @@
+S. Chauveau
+CAPS Entreprise
+clBLAS Project
+------------------------------
+April 30,2014
+
+This document describes a possible procedure to transform an existing 
+solver-based implementation for a given BLAS function into a functor-based
+implementation. 
+
+We assume here that the basic functor infrastructure is already implemented 
+for that function. 
+
+More precisely, we will consider the case of the family of the XSCAL functions
+for which this exercise was already performed. The resulting code can currently 
+be found in the files
+
+  - src/library/blas/functor/include/functor_xscal_generic.h
+  - src/library/blas/functor/functor_xscal_generic.cc
+
+So XSCAL consists of 6 functions all performing the scaling (i.e. multiplication) 
+of a vector X by a scalar alpha.  SSCAL, DSCAL, CSCAL and ZSCAL are respectively 
+for the float, double, complex float and complex double cases while CSSCAL and ZDSCAL 
+are special cases when the vector X is complex and the scalar alpha is not. 
+
+The file 'functor_xscal.h' defines a generic functor type for each of
+those functions:
+
+  - clblasSscalFunctor
+  - clblasDscalFunctor
+  - clblasCscalFunctor
+  - clblasZscalFunctor
+  - clblasCSscalFunctor
+  - clblasZDscalFunctor
+
+Each of those base functor types defines a similar internal type Args that is used 
+to store the corresponding SCAL arguments (that is Talpha in the code sample below) . 
+ 
+  
+  struct Args
+  {
+    size_t           N;
+    Talpha           alpha;
+    cl_mem           X;
+    size_t           offx;
+    int              incx;
+    cl_command_queue queue;
+    cl_uint          numEventsInWaitList;
+    const cl_event * eventWaitList;
+    cl_event *       events;
+    ...
+  } 
+
+
+
+The OpenCL code used in the generic functor can be found in the file 
+  - src/library/blas/gens/clTemplates/scal.cl
+
+This file is not really an OpenCL program but a template that needs to
+be processed using the existing 'kprint' API. We assume that the reader
+is already familiar with that API.
+
+Apart from the data type (float, double, ...) that template can also be 
+parametrized using two coefficients:
+  - a vector size 
+  - whether the X is properly aligned for the chosen vector size.
+
+Those coefficient (combined with the OpenCL context and device) will
+form what could be called the signature of the functors. Since all
+generic functors will use the same kind of signature, the file 
+'functor_xscal_generic.h' starts by defining a reusable POD (Plain 
+Old Data) type for it:
+
+  struct _clblasXscalFunctorGenericData 
+  {
+    int  vecLen  ;  // Vectorization size
+    bool doVLOAD ;  // if aligned vector load/store can be used 
+  
+    //
+    // The operator < is needed for the cache 
+    //
+    bool operator<(const _clblasXscalFunctorGenericData &b) const 
+    {
+      const  _clblasXscalFunctorGenericData &a = *this ;    
+      if ( a.vecLen  != b.vecLen  ) return a.vecLen  < b.vecLen  ;
+      if ( a.doVLOAD != b.doVLOAD ) return a.doVLOAD < b.doVLOAD ;
+      return false ;
+    }  
+  } ;
+  
+This type will later be used as key in the functor caches so it is 
+given the 'operator<' implementation needed to use it as key in
+the class clblasFunctorCache.  
+
+Next, the file 'functor_xscal_generic.h' provides the declaration of the
+functors. For SSCAL that is that the class clblasSscalFunctorGeneric
+defined as follow:  
+
+ 
+    class clblasSscalFunctorGeneric : public clblasSscalFunctor 
+    { 
+      public: 
+        typedef _clblasXscalFunctorGenericData Data ;  
+        Data data;
+      public:  // Constructor & Destructor
+        clblasSscalFunctorGeneric(cl_context ctxt, cl_device_id dev, const Data & data, cl_int & err);
+        ~clblasSscalFunctorGeneric();
+      public:  // Inherited members from clblasSscalFunctor 
+        virtual clblasStatus execute(Args & a);
+      public:
+        static clblasSscalFunctorGeneric * provide (Args & a);
+      public:
+        typedef clblasFunctorCache<clblasSscalFunctorGeneric, Data> Cache;
+        static Cache cache;
+      public:  
+        cl_program program;
+    };
+
+You should recognize here the execute() method that has to be
+implemented by all implementation of clblasSscalFunctor and the
+provide() method that will be used in place of the constructor to
+insure that the functor is properly cached.
+
+A static cache is also provided using _clblasXscalFunctorGenericData (or its local 
+version Data) as custom key. 
+
+Ideally the constructor shall be private to prevent using it directly
+but for technical reasons (i.e. the use of templates to factorize the
+implementation of 'provide') it had to be made public.
+
+Each functor also carries a Data member and a cl_program member that will be 
+used by execute function.
+
+The other functors are implemented in a very similar way. In fact, the
+6 functor classes defined in this file are almost identical except for
+minor details. Their implementation in 'functor_xscal_generic.cpp'
+will make extensive use of templates to avoid rewriting too much
+code. Another approach that requires some minor but not negligible
+architectural changes is possible and described in Appendix A below.
+
+
+The 'execute' method is implemented is implemented in functor_xscal_generic.cc 
+using the static templated function 'xscalExecute':
+
+
+    clblasStatus clblasSscalFunctorGeneric::execute(Args & args)
+    {
+      size_t nThreads = args.N; //to customize accord   ing to the device, data and args
+      return xscalExecute<cl_float>(args.queue, 
+                                    this->program, 
+                                    "Sscal_kernel",
+                                    args.alpha, 
+                                    args.X, 
+                                    args.N, 
+                                    args.offx, 
+                                    args.incx,
+                                    nThreads);
+    }
+    
+The last argument of xscalExecute represents the number of threads. In
+that version it is simply set to args.N which is functionally correct
+but clearly not optimal.  A more complex formula depending of the
+architecture and of the Data is clearly needed here.
+
+
+The template type cl_float is the type of the alpha which is strictly
+speaking not mandatory in this case (because it can be inferred from
+the argument).  Apart from the arguments, the command queue and the
+OpenCL program, the template mechanism also changes the kernel name
+which is passed here as third argument. 
+
+The implementation of xscalExecute() is a typical OpenCL kernel call:
+
+    template <typename TA>
+    static clblasStatus xscalExecute(cl_command_queue queue, 
+                                     cl_program program, 
+                                     const char * kernelName, 
+                                     TA alpha, 
+                                     cl_mem X, 
+                                     uint N, 
+                                     uint offx, 
+                                     int incx,
+                                     size_t nThreads)
+    {
+      cl_int err;
+      cl_kernel kernel = clCreateKernel( program, kernelName,  &err);
+      if (err != CL_SUCCESS) return clblasStatus(err) ; 
+    
+      clblasFunctor::setKernelArg<TA>     (kernel, 0, alpha);
+      clblasFunctor::setKernelArg<cl_mem> (kernel, 1, X);
+      clblasFunctor::setKernelArg<uint>   (kernel, 2, N);
+      clblasFunctor::setKernelArg<uint>   (kernel, 3, offx);
+      clblasFunctor::setKernelArg<int>    (kernel, 4, incx);
+      
+      size_t globalThreads[1] = { nThreads };
+    
+      cl_event event;
+      err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL,
+                                   globalThreads, NULL , 
+                                   0, NULL, &event);
+    
+      clReleaseKernel(kernel) ;
+      return clblasStatus(err) ;
+    }
+    
+The functor constructor is implemented in a similar way using the templated
+static function xcalCreateProgram: 
+
+     
+     template <class FUNCTOR>
+     static cl_program xcalCreateProgram(cl_context ctxt, 
+                                         cl_device_id dev,
+                                         char type,
+                                         const char* functorName,
+                                         const typename FUNCTOR::Data & data,
+                                         cl_int & err)
+     {
+       BinaryLookup bl(ctxt, dev, functorName);
+     
+       bl.variantInt(data.vecLen);
+       bl.variantInt(data.doVLOAD);
+     
+       if ( bl.found() ) // may create empty file or may wait until file is ready  
+       {
+         return bl.getProgram();
+       }
+       else
+       {
+         char tempTemplate[32*1024];
+         char buf         [32*1024];
+         cl_program scalProgram;
+     
+         strcpy( tempTemplate, (char*)scal_kernel );
+         kprintf kobj( type, data.vecLen, data.doVLOAD, data.doVLOAD);
+         kobj.spit((char*)buf, tempTemplate);
+     
+         scalProgram = BinaryLookup::buildProgramFromSource(buf, ctxt, dev, err /*, options*/);
+         
+         if(scalProgram)
+         {
+           bl.setProgram(scalProgram);
+           bl.populateCache();
+         }
+         
+         return scalProgram;
+       }
+     
+     }
+
+     clblasSscalFunctorGeneric::clblasSscalFunctorGeneric(cl_context ctxt, 
+                                                          cl_device_id dev,
+                                                          const Data & data,
+                                                          cl_int & err) : program(0)
+     {
+       this->program = xcalCreateProgram<clblasSscalFunctorGeneric>(
+              ctxt, dev,  'S', "clblasSscalFunctorGeneric", data, err
+             );
+     }
+
+We recognize here a typical use of the BinaryLookup class used to manage the 
+binary cache on disk when enabled. The query effectively occurs during the 
+call to 'bl.found()'. Before that, the members of the Data structure which is 
+also used as key in this cache are added to the lookup object. It is very 
+important not to forget any member else binary cache entries could be reused 
+for incompatible functors. The functorName argument is also used to index 
+the cache entries. It shall be unique so the functor class name is used here.
+
+After the call to bl.found(), then 2 cases are to be
+considered. First, if a compatible entry was found in the binary cache
+then a proper cl_program can be obtain bl.getProgram(). Else, the
+program shall be manually built in that case using the kprintf and the
+BinaryLookup::buildProgramFromSource utility functions. The resulting
+program shall then be stored in the binary cache for further reuses.
+
+
+Last but not least, the 'provide' member is also implemented using a 
+templated call: 
+
+  
+     
+     template <class FUNCTOR>
+     static FUNCTOR * xscalProvide(typename FUNCTOR::Args & args)
+     {
+       cl_device_id dev;
+       cl_context   ctxt;
+       cl_int err = clblasFunctor::getDeviceAndContext(args.queue, dev, ctxt);
+     
+       if (err != CL_SUCCESS)
+       {
+           return NULL;
+       }
+     
+       uint vecLen  = 1 ;      // To customize according to the device and args
+       uint doVLOAD = false ;  // TO DO (see scal_reg.cpp)
+     
+       typename FUNCTOR::Data data = { vecLen , doVLOAD };
+     
+       typename FUNCTOR::Cache::Lookup lookup(FUNCTOR::cache, ctxt, dev, data ) ;
+     
+       if ( lookup.ok() ){
+         FUNCTOR * functor = lookup.get();
+         functor->retain(); 
+         return functor;
+       }
+      
+       FUNCTOR * functor = new FUNCTOR(ctxt, dev, data, err);
+       if (err != CL_SUCCESS)
+       {
+           return NULL;
+       }
+     
+       lookup.set(functor) ;
+     
+       return functor;
+     }
+     
+     clblasSscalFunctorGeneric * 
+     clblasSscalFunctorGeneric::provide (clblasSscalFunctor::Args & args)
+     {
+       return xscalProvide<clblasSscalFunctorGeneric>(args);
+     }
+
+
+This implementation of xscalProvide is pretty basic. After extracting
+the device and context from the queue (which would probably be better
+done earlier), the coefficients of the Data structure are chosen. In 
+this early implementation, a vector size of 1 is used which is safe 
+but not optimal. What is still missing is a set of utility function to 
+help analyze the properties of the vector argument X in order to figure 
+out the best choice for the vectorization. This code probably exists 
+somewhere in the current Solver implementation but still needs to be 
+provided for the functor.
+
+On the Data structure is populated a lookup in the private 
+cache of this functor can be performed. As with the binary cache, 
+we have to differentiate the cases where a cache entry already 
+exists or not. In the later case, the functor must be manually created. 
+
+
+
+
+
+Appendix A - Alternative approach to improve code reuse
+=======================================================
+
+
+The implementation described above suffers from a significant problem
+due to a variant of the so called diamond problem of C++ (and most
+Object-Oriented programming languages). Simply speaking, the technical
+choice made to create one base class for each functor of the SCAL family 
+implies that it becomes very difficult to share code between the  
+classes derived from those base classes.
+
+                          clblasFunctor 
+                      /          |           \  
+                     /           |            \
+     clblasSscalFunctor   clblasDscalFunctor  ...
+           |                     |
+           |                     | 
+ clblasSscalFunctorGeneric  clblaDscalFunctorGeneric   ... 
+
+
+The problem is partially solved by introducing some templated functions 
+but this is not a very elegant solution. 
+
+An alternative could be to define only base class for all functors of
+the SCAL family and then to define only one 'generic' functor call
+(and one fallback class).
+
+The immediate advantage is that the number of functor classes to be
+written would be greatly reduced (typically 4 times for most BLAS
+functions). The disadvantage is that the implementation of those
+classes would be slightly more complex since they would have to manage
+all their variants at once. It is not too late to switch to that new
+design as long as not too many functors are written. This is a minor 
+change to the overall functor design. 
+
+
+
+
+
+
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 126ce28..8068598 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -25,6 +25,54 @@ option( BUILD_CLIENT "Build a command line clBLAS client program with a variety
 option( BUILD_KTEST "A command line tool for testing single clBLAS kernel" ON )
 option( BUILD_SHARED_LIBS "Build shared libraries" ON )
 
+#enable or disable offline compilation for different devices. Currently only Hawaii, Bonaire, Tahiti have the option.
+option( OCL_OFFLINE_BUILD_HAWAII_KERNEL "Offline compile the OpenCL kernels for Hawaii device" OFF)
+option( OCL_OFFLINE_BUILD_BONAIRE_KERNEL "Offline compile the OpenCL kernels for Bonaire device" OFF)
+option( OCL_OFFLINE_BUILD_TAHITI_KERNEL "Offline compile the OpenCL kernels for Tathit device" OFF)
+
+if( (OCL_OFFLINE_BUILD_HAWAII_KERNEL AND OCL_OFFLINE_BUILD_BONAIRE_KERNEL) OR (OCL_OFFLINE_BUILD_HAWAII_KERNEL AND OCL_OFFLINE_BUILD_TAHITI_KERNEL) OR (OCL_OFFLINE_BUILD_BONAIRE_KERNEL AND OCL_OFFLINE_BUILD_TAHITI_KERNEL))
+   MESSAGE( WARNING "More than one device is chosen for offline compilation of static kernels. This might result in running out of heap memory with certain driver. Please consider offline compliation for ONE device only." )
+endif( )
+
+if( NOT OCL_OFFLINE_BUILD_HAWAII_KERNEL )
+  #use dynamic generated kernels
+  MESSAGE(STATUS "Build dynamic Hawaii kernels.")
+  MESSAGE(STATUS "Check OCL_OFFLINE_BUILD_HAWAII_KERNEL to build kernls at compile-time. This will eliminates clBuildProgram() overhead and better kernel performance with certain driver.")
+  add_definitions(-DCLBLAS_HAWAII_DYNAMIC_KERNEL) 
+else()
+  MESSAGE(STATUS "Build static Hawaii kernels.")
+  MESSAGE(STATUS "Uncheck OCL_OFFLINE_BUILD_HAWAII_KERNEL to build kernls at run-time")
+  MESSAGE(STATUS "Please ensure the presence of Hawaii device in the system. With certain driver/compiler flags, this might result in compile-time error.")  
+endif( )
+
+if( NOT OCL_OFFLINE_BUILD_BONAIRE_KERNEL )
+  #use dynamic generated kernels
+  MESSAGE(STATUS "Build dynamic Bonaire kernels.")
+  MESSAGE(STATUS "Check OCL_OFFLINE_BUILD_BONAIRE_KERNEL to build kernls at compile-time. This will eliminates clBuildProgram() overhead and better kernel performance with certain driver.")
+  add_definitions(-DCLBLAS_BONAIRE_DYNAMIC_KERNEL) 
+else()
+  MESSAGE(STATUS "Build static Bonaire kernels.")
+  MESSAGE(STATUS "Uncheck OCL_OFFLINE_BUILD_BONAIRE_KERNEL to build kernls at run-time")
+  MESSAGE(STATUS "Please ensure the presence of Bonaire device in the system. With certain driver/compiler flags, this might result in compile-time error.")    
+endif( )
+
+if( NOT OCL_OFFLINE_BUILD_TAHITI_KERNEL )
+  #use dynamic generated kernels
+  MESSAGE(STATUS "Build dynamic Tahiti kernels.")
+  MESSAGE(STATUS "Check OCL_OFFLINE_BUILD_TAHITI_KERNEL to build kernls at compile-time. This will eliminates clBuildProgram() overhead and better kernel performance with certain driver.")
+  add_definitions(-DCLBLAS_TAHITI_DYNAMIC_KERNEL) 
+else( )
+  MESSAGE(STATUS "Build static Tahiti kernels.")
+  MESSAGE(STATUS "Uncheck OCL_OFFLINE_BUILD_TAHITI_KERNEL to build kernls at run-time")
+  MESSAGE(STATUS "Please ensure the presence of Tahiti device in the system. With certain driver/compiler flags, this might result in compile-time error.")    
+endif( )
+
+
+# Ask the user to verify compiler version. If OpenCL 2.0 is supported. Certain public flags can be user
+set( OCL_VERSION "1.2" CACHE STRING "The version of OpenCL supported by your driver/device" )
+set_property( CACHE OCL_VERSION PROPERTY STRINGS 2.0 1.2 1.1 )
+message( STATUS "You have confirmed OpenCL ${OCL_VERSION} is supported in your system" )
+
 # By default test-correctness is linked and tested against ACML library.
 # However, test-correctness can instead use NETLIB as a reference library
 set(CORR_TEST_WITH_ACML ON CACHE BOOL "Use ACML library in correctness tests")
@@ -117,6 +165,20 @@ if( UNIX )
     set(MATH_LIBRARY "m")
 endif()
 
+# set the path to specific OpenCL compiler
+set( OCL_COMPILER_DIR "OPENCL COMPILER PATH" CACHE PATH "OPENCL COMPILER PATH")
+if ( ${OCL_COMPILER_DIR} STREQUAL "OPENCL COMPILER PATH")
+    message( STATUS "Use default OpenCL Compiler")
+	  set(ENV_PATH "$ENV{PATH}")
+else ()
+    message( STATUS "OPENCL COMPILER: ${OCL_COMPILER_DIR}")
+	if(UNIX)
+	  set(ENV_PATH "${OCL_COMPILER_DIR}")
+	else()
+	  set(ENV_PATH "${OCL_COMPILER_DIR}")
+	endif()
+endif()
+
 # Find the BLAS library
 # TODO: maybe this could be written using the FindBLAS module in the future
 if( BUILD_TEST )
@@ -268,6 +330,7 @@ if( BUILD_PERFORMANCE AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/scripts/perf" )
 endif( )
 
 if( BUILD_RUNTIME AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/library" )
+#	add_subdirectory( library/tools/bingen )
 	add_subdirectory( library )
 	add_subdirectory( library/tools/tune )
 	if( BUILD_KTEST )
diff --git a/src/clBLAS.def b/src/clBLAS.def
index 0a9f9b6..0a3d8fb 100644
--- a/src/clBLAS.def
+++ b/src/clBLAS.def
@@ -225,3 +225,31 @@ EXPORTS
 	
 	clblasAddScratchImage
 	clblasRemoveScratchImage
+
+  clblasMatrixSizeInfo
+	clblasCreateMatrix
+	clblasCreateMatrixWithLd
+	clblasCreateMatrixFromHost
+	clblasWriteSubMatrix
+	clblasWriteSubMatrixAsync
+	clblasReadSubMatrix
+	clblasReadSubMatrixAsync
+	clblasCopySubMatrix
+	clblasCopySubMatrixAsync
+	clblasWriteVector
+	clblasWriteVectorAsync
+	clblasReadVector
+	clblasReadVectorAsync
+	clblasCopyVector
+	clblasCopyVectorAsync
+	clblasWriteMatrix
+	clblasWriteMatrixAsync
+	clblasReadMatrix
+	clblasReadMatrixAsync
+	clblasCopyMatrix
+	clblasCopyMatrixAsync
+	clblasFillVector
+	clblasFillVectorAsync
+	clblasFillMatrix
+	clblasFillSubMatrix
+	clblasFillSubMatrixAsync
diff --git a/src/clBLAS.h b/src/clBLAS.h
index 7d89b9f..fef80a4 100644
--- a/src/clBLAS.h
+++ b/src/clBLAS.h
@@ -9644,6 +9644,628 @@ clblasZher2k(
     cl_event *events);
 /*@}*/
 
+/**
+ * @brief Helper function to compute leading dimension and size of a matrix
+ *
+ * @param[in] order	matrix ordering
+ * @param[in] rows	number of rows
+ * @param[in] columns	number of column
+ * @param[in] elemsize	element size
+ * @param[in] padding	additional padding on the leading dimension
+ * @param[out] ld	if non-NULL *ld is filled with the leading dimension  
+ *			in elements
+ * @param[out] fullsize	if non-NULL *fullsize is filled with the byte size
+ *
+ * @return
+ *   - \b clblasSuccess for success
+ *   - \b clblasInvalidValue if:
+ *	 - \b elementsize is 0
+ *	 - \b row and \b colums are both equal to 0
+ */
+clblasStatus clblasMatrixSizeInfo(
+	clblasOrder order,
+	size_t rows,
+	size_t columns, 
+	size_t elemsize,
+	size_t padding, 
+	size_t * ld, 
+	size_t * fullsize);
+
+/**
+ * @brief Allocates matrix on device and computes ld and size
+ *
+ * @param[in] context	OpenCL context
+ * @param[in] order	Row/column order.
+ * @param[in] rows	number of rows
+ * @param[in] columns	number of columns
+ * @param[in] elemsize	element size
+ * @param[in] padding	additional padding on the leading dimension
+ * @param[out] ld	if non-NULL *ld is filled with the leading dimension  
+ *			in elements
+ * @param[out] fullsize	if non-NULL *fullsize is filled with the byte size
+ * @param[in] err	Error code (see \b clCreateBuffer() )
+ * 
+ * @return
+ *   - OpenCL memory object of the allocated matrix
+ */
+cl_mem clblasCreateMatrix(
+	cl_context context,
+	clblasOrder order,
+	size_t rows,
+	size_t columns,
+	size_t elemsize,
+	size_t padding,
+	size_t * ld,
+	size_t * fullsize,
+	cl_int * err);
+
+
+/**
+ * @brief Allocates matrix on device with specified size and ld and computes its size
+ *
+ * @param[in] context	OpenCL context
+ * @param[in] order	Row/column order.
+ * @param[in] rows	number of rows
+ * @param[in] columns	number of columns 
+ * @param[in] elemsize	element size
+ * @param[in] padding	additional padding on the leading dimension
+ * @param[out] ld	the length of the leading dimensions. It cannot 
+ *                      be less than \b columns when the \b order parameter is set to
+ *                      \b clblasRowMajor,\n or less than \b rows when the
+ *                      parameter is set to \b clblasColumnMajor.
+ * @param[out] fullsize	if non-NULL *fullsize is filled with the byte size
+ * @param[in] err	Error code (see \b clCreateBuffer() )
+ * 
+ * @return
+ *   - OpenCL memory object of the allocated matrix
+ */
+cl_mem clblasCreateMatrixWithLd( cl_context context,
+                                 clblasOrder order,
+                                 size_t rows,
+                                 size_t columns,
+                                 size_t elemsize,
+                                 size_t ld,
+                                 size_t * fullsize,
+                                 cl_int * err) ;
+
+
+/**
+ * @brief Allocates matrix on device and initialize from existing similar matrix
+ *	  on host. See \b clblasCreateMatrixBuffer().
+ *
+ * @param[in] ld	leading dimension in elements
+ * @param[in] host 	base address of host matrix data
+ * @param[in] off_host 	host matrix offset in elements
+ * @param[in] ld_host 	leading dimension of host matrix in elements
+ * @param[in] command_queue 		specifies the OpenCL queue
+ * @param[in] numEventsInWaitList 	specifies the number of OpenCL events 
+ *	   	    		        to wait for
+ * @param[in] eventWaitList 		specifies the list of OpenCL events to 
+ *					wait for
+ *					
+ * @return
+ *   - OpenCL memory object of the allocated matrix
+ */
+cl_mem clblasCreateMatrixFromHost(
+	cl_context context, 
+	clblasOrder order,
+	size_t rows,
+	size_t columns, 
+	size_t elemsize,
+	size_t ld,
+	void * host,
+	size_t off_host, 
+	size_t ld_host,
+  cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_int * err);
+
+/**
+ * @brief Copies synchronously a sub-matrix from host (A) to device (B).
+ * 
+ * @param[in] order			matrix ordering
+ * @param[in] element_size		element size
+ * @param[in] A				specifies the source matrix on the host
+ * @param[in] offA			specifies the offset of matrix A in 
+ *					elements
+ * @param[in] ldA			specifies the leading dimension of 
+ * 					matrix A in elements
+ * @param[in] nrA			specifies the number of rows of A 
+ *					in elements
+ * @param[in] ncA			specifies the number of columns of A 
+ *					in elements
+ * @param[in] xA			specifies the top-left x position to 
+ * 					copy from A
+ * @param[in] yA			specifies the top-left y position to 
+ * 					copy from A
+ * @param[in] B				specifies the destination matrix on the 
+ *					device
+ * @param[in] offB			specifies the offset of matrix B in 
+ *					elements
+ * @param[in] ldB 			specifies the leading dimension of 
+ * 					matrix B in bytes
+ * @param[in] nrB 			specifies the number of rows of B 
+ *					in elements
+ * @param[in] ncB 			specifies the number of columns of B 
+ *					in elements
+ * @param[in] xB 			specifies the top-left x position to 
+ *					copy from B
+ * @param[in] yB 			specifies the top-left y position to 
+ *					copy from B
+ * @param[in] nx 			specifies the number of elements to 
+ *					copy according to the x dimension (rows)
+ * @param[in] ny 			specifies the number of elements to 
+ *					copy according to the y dimension 
+ *					(columns)
+ * @param[in] command_queue 		specifies the OpenCL queue
+ * @param[in] numEventsInWaitList 	specifies the number of OpenCL events 
+ *	   	    		        to wait for
+ * @param[in] eventWaitList 		specifies the list of OpenCL events to 
+ *					wait for
+ *
+ * @return
+ *   - \b clblasSuccess for success
+ *   - \b clblasInvalidValue if:
+ *	- \b xA + \b offA + \b nx is superior to number of columns of A
+ *      - \b xB + \b offB + \b nx is superior to number of columns of B
+ *      - \b yA + \b ny is superior to number of rows of A
+ *      - \b yB + \b ny is superior to number of rows of B
+ */
+clblasStatus clblasWriteSubMatrix(
+	clblasOrder order,
+	size_t element_size,
+	const void *A, size_t offA, size_t ldA,
+	size_t nrA, size_t ncA,
+	size_t xA, size_t yA,
+	cl_mem B, size_t offB, size_t ldB,
+	size_t nrB, size_t ncB,
+	size_t xB, size_t yB,
+	size_t nx, size_t ny,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a sub-matrix from host (A) to device (B). 
+ *	  See \b clblasWriteSubMatrix().
+ *
+ * @param[out] event 	Event objects per each command queue that identify a 
+ *			particular kernel execution instance.
+ */
+clblasStatus clblasWriteSubMatrixAsync(
+	clblasOrder order,
+	size_t element_size,
+	const void *A, size_t offA, size_t ldA,
+	size_t nrA, size_t ncA,
+	size_t xA, size_t yA,
+	cl_mem B, size_t offB, size_t ldB,
+	size_t nrB, size_t ncB,
+	size_t xB, size_t yB,
+	size_t nx, size_t ny,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *event);
+
+/**
+ * @brief Copies a sub-matrix from device (A) to host (B). 
+ *	  See \b clblasWriteSubMatrix().
+ * 
+ * @param[in] A		specifies the source matrix on the device
+ * @param[in] B		specifies the destination matrix on the host
+ *
+ * @return
+ *   - see \b clblasWriteSubMatrix()
+ */
+clblasStatus clblasReadSubMatrix(
+	clblasOrder order,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	size_t nrA, size_t ncA,
+	size_t xA, size_t yA,
+	void *B, size_t offB, size_t ldB,
+	size_t nrB, size_t ncB,
+	size_t xB, size_t yB,
+	size_t nx, size_t ny,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a sub-matrix from device (A) to host (B). 
+ * 	  See \b clblasReadSubMatrix() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasReadSubMatrixAsync(
+	clblasOrder order,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	size_t nrA, size_t ncA,
+	size_t xA, size_t yA,
+	void *B, size_t offB, size_t ldB,
+	size_t nrB, size_t ncB,
+	size_t xB, size_t yB,
+	size_t nx, size_t ny,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *event);
+
+/**
+ * @brief Copies a sub-matrix from device (A) to device (B). 
+ *	  See \b clblasWriteSubMatrix().
+ * 
+ * @param[in] A		specifies the source matrix on the device
+ * @param[in] B		specifies the destination matrix on the device
+ *
+ * @return
+ *   - see \b clblasWriteSubMatrix()
+ */
+clblasStatus clblasCopySubMatrix(
+	clblasOrder order,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	size_t nrA, size_t ncA,
+	size_t xA, size_t yA,
+	cl_mem B, size_t offB, size_t ldB,
+	size_t nrB, size_t ncB,
+	size_t xB, size_t yB,
+	size_t nx, size_t ny,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a sub-matrix from device (A) to device (B). 
+ *        See \b clblasCopySubMatrix() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasCopySubMatrixAsync(
+	clblasOrder order,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	size_t nrA, size_t ncA,
+	size_t xA, size_t yA,
+	cl_mem B, size_t offB, size_t ldB,
+	size_t nrB, size_t ncB,
+	size_t xB, size_t yB,
+	size_t nx, size_t ny,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *event);
+
+/**
+ * @brief Copies synchronously a vector from host (A) to device (B). 
+ *	  See \b clblasWriteSubMatrix().
+ * 
+ * @param[in] A		specifies the source vector on the host
+ * @param[in] B		specifies the destination vector on the device
+ *
+ * @return
+ *   - see \b clblasWriteSubMatrix()
+ */
+clblasStatus clblasWriteVector(
+	size_t nb_elem,
+	size_t element_size,
+	const void *A, size_t offA,
+	cl_mem B, size_t offB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a vector from host (A) to device (B). 
+ * 	  See \b clblasWriteVector() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasWriteVectorAsync(
+	size_t nb_elem,
+	size_t element_size,
+	const void *A, size_t offA,
+	cl_mem B, size_t offB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *events);
+
+/**
+ * @brief Copies synchronously a vector from device (A) to host (B). 
+ *	  See \b clblasReadSubMatrix().
+ * 
+ * @param[in] A		specifies the source vector on the device
+ * @param[in] B		specifies the destination vector on the host
+ *
+ * @return
+ *   - see \b clblasReadSubMatrix()
+ */
+clblasStatus clblasReadVector(
+	size_t nb_elem,
+	size_t element_size,
+	const cl_mem A, size_t offA,
+	void * B, size_t offB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a vector from device (A) to host (B). 
+ * 	  See \b clblasReadVector() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasReadVectorAsync(
+	size_t nb_elem,
+	size_t element_size,
+	const cl_mem A, size_t offA,
+	void * B, size_t offB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *events);
+
+/**
+ * @brief Copies synchronously a vector from device (A) to device (B). 
+ *	  See \b clblasCopySubMatrix().
+ * 
+ * @param[in] A		specifies the source vector on the device
+ * @param[in] B		specifies the destination vector on the device
+ *
+ * @return
+ *   - see \b clblasCopySubMatrix()
+ */
+clblasStatus clblasCopyVector(
+	size_t nb_elem,
+	size_t element_size,
+	const cl_mem A, size_t offA,
+	cl_mem B, size_t offB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a vector from device (A) to device (B). 
+ * 	  See \b clblasCopyVector() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasCopyVectorAsync(
+	size_t nb_elem,
+	size_t element_size,
+	const cl_mem A, size_t offA,
+	cl_mem B, size_t offB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *events);
+
+/**
+ * @brief Copies synchronously a whole matrix from host (A) to device (B). 
+ *        See \b clblasWriteSubMatrix().
+ * 
+ * @param[in] A		specifies the source matrix on the host
+ * @param[in] B		specifies the destination matrix on the device
+ *
+ * @return
+ *   - see \b clblasWriteSubMatrix()
+ */
+clblasStatus clblasWriteMatrix(
+	clblasOrder order,
+	size_t sx, size_t sy,
+	size_t element_size,
+	const void *A, size_t offA, size_t ldA,
+	cl_mem B, size_t offB, size_t ldB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a vector from host (A) to device (B). 
+ *        See \b clblasWriteMatrix() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasWriteMatrixAsync(
+	clblasOrder order,
+	size_t sx, size_t sy,
+	size_t element_size,
+	const void *A, size_t offA, size_t ldA,
+	cl_mem B, size_t offB, size_t ldB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *events);
+
+/**
+ * @brief Copies synchronously a whole matrix from device (A) to host (B). 
+ *	  See \b clblasReadSubMatrix().
+ * 
+ * @param[in] A		specifies the source vector on the device
+ * @param[in] B		specifies the destination vector on the host
+ *
+ * @return
+ *   - see \b clblasReadSubMatrix()
+ */
+clblasStatus clblasReadMatrix(
+	clblasOrder order,
+	size_t sx, size_t sy,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	void * B, size_t offB, size_t ldB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a vector from device (A) to host (B). 
+ *        See \b clblasReadMatrix() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasReadMatrixAsync(
+	clblasOrder order,
+	size_t sx, size_t sy,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	void * B, size_t offB, size_t ldB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *events);
+
+/**
+ * @brief Copies synchronously a whole matrix from device (A) to device (B). 
+ *	  See \b clblasCopySubMatrix().
+ * 
+ * @param[in] A		specifies the source matrix on the device
+ * @param[in] B		specifies the destination matrix on the device
+ *
+ * @return
+ *   - see \b clblasCopySubMatrix()
+ */
+clblasStatus clblasCopyMatrix(
+	clblasOrder order,
+	size_t sx, size_t sy,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	cl_mem B, size_t offB, size_t ldB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList);
+
+/**
+ * @brief Copies asynchronously a vector from device (A) to device (B). 
+ *        See \b clblasCopyMatrix() and \b clblasWriteSubMatrixAsync().
+ */
+clblasStatus clblasCopyMatrixAsync(
+	clblasOrder order,
+	size_t sx, size_t sy,
+	size_t element_size,
+	const cl_mem A, size_t offA, size_t ldA,
+	cl_mem B, size_t offB, size_t ldB,
+	cl_command_queue command_queue,
+	cl_uint numEventsInWaitList,
+	const cl_event *eventWaitList,
+	cl_event *events);
+
+/**
+ * @brief Fill synchronously a vector with a pattern of a size element_size_bytes
+ * 
+ * @param[in] nb_elem             specifies the number of element in buffer A
+ * @param[in] element_size        specifies the size of one element of A. Supported sizes correspond 
+ *                                element size used in clBLAS (1,2,4,8,16)
+ * @param[in] A		          specifies the source vector on the device
+ * @param[in] offA                specifies the offset of matrix A in 
+ *				  elements
+ * @param[in] pattern             specifies the host address of the pattern to fill with (element_size_bytes)
+ * @param[in] command_queue 	  specifies the OpenCL queue
+ * @param[in] numEventsInWaitList specifies the number of OpenCL events 
+ *	   	    		  to wait for
+ * @param[in] eventWaitList 	  specifies the list of OpenCL events to 
+ *				  wait for
+ * @return
+ *   - see \b clblasWriteSubMatrix()
+ */
+clblasStatus clblasFillVector(
+     size_t nb_elem,
+     size_t element_size,
+     cl_mem A, size_t offA,
+     const void * host,
+     cl_command_queue command_queue,
+     cl_uint numEventsInWaitList,
+     const cl_event *eventWaitList);
+
+/**
+ * @brief Fill asynchronously a vector with a pattern of a size element_size_bytes
+ *	  See \b clblasFillVector().
+ */
+clblasStatus clblasFillVectorAsync(
+     size_t nb_elem,
+     size_t element_size,
+     cl_mem A, size_t offA,
+     const void * pattern,
+     cl_command_queue command_queue,
+     cl_uint numEventsInWaitList,
+     const cl_event *eventWaitList,
+     cl_event *event);
+
+/**
+ * @brief Fill synchronously a matrix with a pattern of a size element_size_bytes
+ * 
+ * @param[in] order               specifies the matrix order
+ * @param[in] element_size        specifies the size of one element of A. Supported sizes correspond 
+ *                                element size used in clBLAS (1,2,4,8,16)
+ * @param[in] A		          specifies the source vector on the device
+ * @param[in] offA                specifies the offset of matrix A in 
+ * @param[in] ldA                 specifies the leading dimension of A
+ * @param[in] nrA                 specifies the number of row in A
+ * @param[in] ncA                 specifies the number of column in A
+ * @param[in] pattern             specifies the host address of the pattern to fill with (element_size_bytes)
+ * @param[in] command_queue 	  specifies the OpenCL queue
+ * @param[in] numEventsInWaitList specifies the number of OpenCL events to wait for
+ * @param[in] eventWaitList 	  specifies the list of OpenCL events to wait for
+ * @return
+ *   - see \b clblasWriteSubMatrix()
+ */
+clblasStatus clblasFillMatrix(
+     clblasOrder order,
+     size_t element_size,
+     cl_mem A, size_t offA, size_t ldA,
+     size_t nrA, size_t ncA,
+     const void *pattern,
+     cl_command_queue command_queue,
+     cl_uint numEventsInWaitList,
+     const cl_event *eventWaitList);
+
+
+/**
+ * @brief Partially fill a sub-matrix with a pattern of a size element_size_bytes 
+ *        
+ * 
+ * @param[in] order               specifies the matrix order
+ * @param[in] element_size        specifies the size of one element of A. Supported values 
+ *                                are to element sizes used in clBLAS - that is 1, 2, 4, 8 or 16 
+ * @param[in] offA                specifies the offset of matrix A in elements
+ * @param[in] ldA                 specifies the leading dimension of A in elements
+ * @param[in] nrA		  specifies the number of rows of A 
+ *				  in elements
+ * @param[in] ncA		  specifies the number of columns of A 
+ *				  in elements
+ * @param[in] xA		  specifies the top-left x position to 
+ * 				  copy from A
+ * @param[in] yA		  specifies the top-left y position to 
+ * 				  copy from A
+ * @param[in] nx 		  specifies the number of elements to 
+ *				  copy according to the x dimension (rows)
+ * @param[in] ny 		  specifies the number of elements to 
+ *				  copy according to the y dimension 
+ *				  (columns)
+ * @param[in] pattern             specifies the host address of the pattern to fill with (element_size_bytes)
+ * @param[in] command_queue 	  specifies the OpenCL queue
+ * @param[in] numEventsInWaitList specifies the number of OpenCL events to wait for
+ * @param[in] eventWaitList 	  specifies the list of OpenCL events to wait for
+ * @return
+ *   - see \b clblasWriteSubMatrix()
+ */
+
+clblasStatus clblasFillSubMatrix(
+     clblasOrder order,
+     size_t element_size,
+     cl_mem A, size_t offA, size_t ldA,
+     size_t nrA, size_t ncA,
+     size_t xA, size_t yA,
+     size_t nx, size_t ny,
+     const void *pattern,
+     cl_command_queue command_queue,
+     cl_uint numEventsInWaitList,
+     const cl_event *eventWaitList);
+
+/**
+ * @brief Asynchronous asynchronously fill a sub-matrix with a pattern of a size element_size_bytes  
+ *	  See \b clblasFillSubMatrix().
+ */
+clblasStatus clblasFillSubMatrixAsync(
+     clblasOrder order,
+     size_t element_size,
+     cl_mem A, size_t offA, size_t ldA,
+     size_t sxA, size_t syA,
+     int xA, int yA,
+     size_t nx, size_t ny,
+     const void *host,
+     cl_command_queue command_queue,
+     cl_uint numEventsInWaitList,
+     const cl_event *eventWaitList,
+     cl_event *event);
 
 
 
diff --git a/src/client/clfunc_common.hpp b/src/client/clfunc_common.hpp
index bda1186..01363a9 100644
--- a/src/client/clfunc_common.hpp
+++ b/src/client/clfunc_common.hpp
@@ -302,6 +302,7 @@ public:
     virtual void call_func() = 0;
     virtual double gflops() = 0;
     virtual std::string gflops_formula() = 0;
+	virtual void setup_apiCallCount(cl_uint apiCallCount){}
     virtual void setup_buffer(int order_option, int side_option,
                               int uplo_option, int diag_option, int
                               transA_option, int transB_option,
diff --git a/src/client/clfunc_xgemm.hpp b/src/client/clfunc_xgemm.hpp
index fcd40a7..0b3f4f1 100644
--- a/src/client/clfunc_xgemm.hpp
+++ b/src/client/clfunc_xgemm.hpp
@@ -48,6 +48,7 @@ struct xGemmBuffer
     cl_mem buf_c_;
     T alpha_;
     T beta_;
+	cl_uint apiCallCount=1;
 }; // struct buffer
 
 template <typename T>
@@ -67,15 +68,19 @@ public:
     void call_func()
     {
 		timer.Start(timer_id);
-		xGemm_Function(true);
+		xGemm_Function(true, buffer_.apiCallCount);
 		timer.Stop(timer_id);
     }
 
     double gflops()
     {
-        return (2.0*buffer_.m_*buffer_.n_*buffer_.k_)/time_in_ns();
+		return (2.0*buffer_.m_*buffer_.n_*buffer_.k_) / (time_in_ns() / buffer_.apiCallCount);
     }
 
+	void setup_apiCallCount(cl_uint apiCallCount)
+	{
+		buffer_.apiCallCount = apiCallCount;
+	}
     std::string gflops_formula()
     {
         return "2.0*M*N*K/time";
@@ -976,7 +981,7 @@ protected:
 
 private:
     xGemmBuffer<T> buffer_;
-	void xGemm_Function(bool flush);
+	void xGemm_Function(bool flush, cl_uint apiCallCount = 1);
 
 
 }; // class xgemm
@@ -984,14 +989,17 @@ private:
 template<>
 void 
 xGemm<cl_float>::
-xGemm_Function(bool flush)
+xGemm_Function(bool flush, cl_uint apiCallCount )
 {
-	clblasSgemm(order_, buffer_.trans_a_, buffer_.trans_b_,
-                     buffer_.m_, buffer_.n_, buffer_.k_, buffer_.alpha_,
-                     buffer_.buf_a_, buffer_.offA_, buffer_.lda_,
-                     buffer_.buf_b_, buffer_.offB_, buffer_.ldb_,
-                     buffer_.beta_, buffer_.buf_c_, buffer_.offC_,
-                     buffer_.ldc_, 1, &queue_, 0, NULL, &event_);
+	for (int i = 0; i < apiCallCount; i++)
+	{
+		clblasSgemm(order_, buffer_.trans_a_, buffer_.trans_b_,
+			buffer_.m_, buffer_.n_, buffer_.k_, buffer_.alpha_,
+			buffer_.buf_a_, buffer_.offA_, buffer_.lda_,
+			buffer_.buf_b_, buffer_.offB_, buffer_.ldb_,
+			buffer_.beta_, buffer_.buf_c_, buffer_.offC_,
+			buffer_.ldc_, 1, &queue_, 0, NULL, &event_);
+	}
 	//flush==true if only the kernel time (library call) is timed
 	//flush==false if memory time is also timed
 	if (flush==true)
@@ -1003,7 +1011,7 @@ xGemm_Function(bool flush)
 template<>
 void 
 xGemm<cl_double>::
-xGemm_Function(bool flush)
+xGemm_Function(bool flush, cl_uint apiCallCount )
 {
 	clblasDgemm(order_, buffer_.trans_a_, buffer_.trans_b_,
                      buffer_.m_, buffer_.n_, buffer_.k_, buffer_.alpha_,
@@ -1022,7 +1030,7 @@ xGemm_Function(bool flush)
 template<>
 void 
 xGemm<cl_float2>::
-xGemm_Function(bool flush)
+xGemm_Function(bool flush, cl_uint apiCallCount )
 {
 	clblasCgemm(order_, buffer_.trans_a_, buffer_.trans_b_,
                      buffer_.m_, buffer_.n_, buffer_.k_, buffer_.alpha_,
@@ -1041,7 +1049,7 @@ xGemm_Function(bool flush)
 template<>
 void 
 xGemm<cl_double2>::
-xGemm_Function(bool flush)
+xGemm_Function(bool flush, cl_uint apiCallCount )
 {
 	clblasZgemm(order_, buffer_.trans_a_, buffer_.trans_b_,
                      buffer_.m_, buffer_.n_, buffer_.k_, buffer_.alpha_,
diff --git a/src/client/client.cpp b/src/client/client.cpp
index 1618609..f3d3a77 100644
--- a/src/client/client.cpp
+++ b/src/client/client.cpp
@@ -52,6 +52,7 @@ int main(int argc, char *argv[])
   cl_double alpha;
   cl_double beta;
   cl_uint profileCount;
+  cl_uint apiCallCount;
   cl_uint commandQueueFlags = 0;
   cl_device_type deviceType = CL_DEVICE_TYPE_GPU;
   int order_option;
@@ -100,7 +101,8 @@ int main(int argc, char *argv[])
     ( "side", po::value<int>( &side_option )->default_value(0), "0 = left, 1 = right. only used with [list of function families]" ) // xtrsm xtrmm
     ( "uplo", po::value<int>( &uplo_option )->default_value(0), "0 = upper, 1 = lower. only used with [list of function families]" )  // xsymv xsyrk xsyr2k xtrsm xtrmm
     ( "diag", po::value<int>( &diag_option )->default_value(0), "0 = unit diagonal, 1 = non unit diagonal. only used with [list of function families]" ) // xtrsm xtrmm
-    ( "profile,p", po::value<cl_uint>( &profileCount )->default_value(20), "Time and report the kernel speed (default: profiling off)" )
+    ( "profile,p", po::value<cl_uint>( &profileCount )->default_value(20), "Time and report the kernel speed (default: 20)" )
+	( "apiCallCount", po::value<cl_uint>(&apiCallCount)->default_value(10), "Time and report the kernel speed on counds of API calls (default: 10)")
 	( "roundtrip", po::value<std::string>( &roundtrip )->default_value("noroundtrip"),"including the time of OpenCL memory allocation and transportation; options:roundtrip, noroundtrip(default)")
 	( "memalloc", po::value<std::string>( &memalloc )->default_value("default"),"setting the memory allocation flags for OpenCL; would not take effect if roundtrip time is not measured; options:default(default),alloc_host_ptr,use_host_ptr,copy_host_ptr,use_persistent_mem_amd,rect_mem")
     ;
@@ -489,7 +491,7 @@ int main(int argc, char *argv[])
       my_function->initialize_cpu_buffer();
       my_function->initialize_gpu_buffer();
 
-      my_function->call_func(); // do a calculation first to get any compilation out of the way
+	  my_function->call_func(); // do a calculation first to get any compilation out of the way
       my_function->reset_gpu_write_buffer(); // reset GPU write buffer
   }
   catch( std::exception& exc )
@@ -554,9 +556,7 @@ int main(int argc, char *argv[])
   }
   if(roundtrip=="noroundtrip"||roundtrip=="both")
   {
-  timer.Reset();
-  for( cl_uint i = 0; i < profileCount; ++i )
-  {
+    timer.Reset();
     my_function->setup_buffer( order_option, side_option, uplo_option,
                                  diag_option, transA_option, transB_option,
                                    M, N, K, lda, ldb, ldc, offA, offBX, offCY,
@@ -565,17 +565,20 @@ int main(int argc, char *argv[])
 
     my_function->initialize_cpu_buffer();
     my_function->initialize_gpu_buffer();
-    my_function->call_func();
+	my_function->setup_apiCallCount( apiCallCount );
+	for (cl_uint i = 0; i < profileCount; ++i)
+    {
+		my_function->call_func();
+	}
 	my_function->read_gpu_buffer();
     //my_function->reset_gpu_write_buffer();
 	my_function->releaseGPUBuffer_deleteCPUBuffer();
-  }
 
   if( commandQueueFlags & CL_QUEUE_PROFILING_ENABLE )
   {
     //std::cout << timer << std::endl;
     timer.pruneOutliers( 3.0 );
-    std::cout << "BLAS kernel execution time < ns >: " << my_function->time_in_ns() << std::endl;
+    std::cout << "BLAS kernel execution time < ns >: " << my_function->time_in_ns() / apiCallCount << std::endl;
     std::cout << "BLAS kernel execution Gflops < " <<
       my_function->gflops_formula() << " >: " << my_function->gflops() <<
       std::endl;
diff --git a/src/flags_public.txt b/src/flags_public.txt
new file mode 100644
index 0000000..e3fcd75
--- /dev/null
+++ b/src/flags_public.txt
@@ -0,0 +1,4 @@
+TAHITI_OCL " ";
+HAWAII1_OCL " ";
+HAWAII2_OCL "-cl-std=CL2.0";
+BONAIRE_OCL " ";
diff --git a/src/include/binary_lookup.h b/src/include/binary_lookup.h
new file mode 100644
index 0000000..3a53e60
--- /dev/null
+++ b/src/include/binary_lookup.h
@@ -0,0 +1,273 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef __CLBLAS_BINARY_LOOKUP__
+#define __CLBLAS_BINARY_LOOKUP__
+
+#if defined(__APPLE__) || defined(__MACOSX)
+#include <OpenCL/cl.h>
+#else
+#include <CL/cl.h>
+#endif
+
+#include <string>
+#include <vector>
+
+//
+// BinaryLookup defines an API to manage the kernel cache on the disk
+//
+// The BinaryLookup object provides methods to:
+//  * check if a cache file exists on the disk or not
+//  * fill-up the signature to characterize the program beeing built on the disk
+//  * build a cl_program from a string kernel or from a binary
+// 
+// A cache entry is a file stored on the disk which contains 3 sections:
+//  * A header section (providing information about file structure)
+//  * The binary contained in the cl_program
+//  * A signature which provides additionnal informations about the kernel
+//    and allows to characterize the kernel in the disk cache
+//
+// The environment variable CLBLAS_CACHE_PATH defines the location of the
+// cache on the disk. If the variable CLBLAS_CACHE_PATH is not defined, no
+// cache file is written on the disk, but the cl_program can be built and
+// remains on memory
+//
+// Concerning multithreading, the policy is that every thread build the
+// cl_program from the source, but only the first one writes it on the
+// disk. Other threads continue with the cl_program in memory.
+//
+// A typical cache query shall be composed of the following steps:
+//
+//  (1) Create a local instance of BinaryLookup 
+//
+//  (2) Specify the additional characteristics (i.e. variants) of the
+//      requested program. Those information combined with the program
+//      name and the OpenCL context and device shall form a unique
+//      signature for the binary program.
+// 
+//  (3) Perform the effective search by calling the 'found' method
+// 
+//  (4) if the search was successfull then cl_program can be retreived 
+//      by a call to the 'getProgram' method
+// 
+//  (5) if the search was not successfull then a cl_program 
+//      must be created  and populated in the cache by a call 
+//      to the 'setProgram' method.
+// 
+//  (6) Destroy the BinaryLookup local instance.
+// 
+// For instance, that could be 
+//
+//     cl_program program  ;
+//   
+//     The program name is part of the signature and shall be unique 
+//     const char * program_name = "... my unique program name ... " ;
+//   
+//     BinaryLookup bl(context, device, program_name);
+//   
+//     //  Specify additionnal information used to build a
+//     //  signature signature for that cache entry 
+//                  
+//     bl.variantInt( vectorSize );
+//     bl.variantInt( hasBorder );
+//     ... 
+//   
+//     // Perform the query 
+//     if ( bl.found() ) 
+//     {
+//        //   Success! use the cl_program retreived from the cache
+//        program = bl.getProgram();
+//     }
+//     else 
+//     {
+//        //   Failure! we need to build the program ourself
+//        program = build_the_program(context,device,vectorSize,...) ; 
+//        //   Inform the lookup object of the program
+//        bl.setProgram(program);  
+//        //   And populate the cache
+//        bl.populateCache() 
+//     }
+// 
+// Remark: The members buildFromSource, buildFromBinary etc are utility 
+//         functions that can be used to build the cl_program from either 
+//         sources or binary (e.g. SPIR). Their use is optionnal. 
+//
+//
+class BinaryLookup
+ {
+public:
+    // Constructor
+    // \param ctxt the context for which the cl_program should be built
+    // \param device the device for which the cl_program should be built
+    // \param kernel_name the kernel identifier
+    BinaryLookup(cl_context ctxt, cl_device_id device, const std::string & kernel_name);
+    ~BinaryLookup();
+
+    // Methods to fill up the signature of the cache entry
+    void variantInt(int num);
+    void variantDouble(double num);
+    void variantCompileOptions(const std::string & opts);
+    void variantRaw(const void * data, size_t bytes);
+
+    // Indicates whether or not the cache entry was found on the disk
+    // If the cache entry was found and is complete on the disk, its content
+    // is loaded
+    // \return true if a cache entry was found, false else
+    bool found();
+
+    // Build a cl_program from the source code and init attributes
+    // of the current structure
+    // so that the program can be accessed with the getProgram method
+    // Write the file to the cache
+    cl_int buildFromSource(const char * source);
+
+    // Build a cl_program from the source code and init attributes
+    // so that the program can be accessed with the getProgram method
+    // Write the file to the cache
+    cl_int buildFromBinary(const void * data, 
+                           size_t len,
+                           const char * BuildOption);
+
+    // Returns the cl_program built from binary or loaded from disk
+    cl_program getProgram();
+
+    // Set the current m_program to the given program
+    void setProgram(cl_program program);
+
+    // Build a cl_program from a text
+    static cl_program buildProgramFromSource(const char * filename,
+                                             cl_context context,
+                                             cl_device_id device,
+                                             cl_int & err,
+                                             const char * options = 0);
+
+    // Build a cl_program from binary
+    static cl_program buildProgramFromBinary(const char * data,
+                                             size_t data_size,
+                                             cl_context context,
+                                             cl_device_id device,
+                                             cl_int & err,
+                                             const char * options = 0);
+
+    // Initialize the whole cache file information (magic_key, header and program)
+    // and dump on the disk
+    cl_int populateCache();
+
+private:
+
+    // Serialize variants and compute the checksum to load the file from cache
+    void finalizeVariant();
+
+    // Build a cl_program from the source code and init attributes
+    // so that the program can be accessed with the getProgram method
+    // Do not write the file to the cache
+    cl_int buildFromLoadedBinary(const void * data, 
+                                 size_t len,
+                                 const char * BuildOption);
+
+    // Try to retrieve the header of the cache file
+    // Returns: ok if the header sections was successfully loaded, false else
+    bool loadHeader(std::ifstream &file, size_t length);
+
+    // Try to retrieve the cl_program and its signature in file
+    // Returns: ok if the binary and signature sections were successfully loaded, false else
+    bool loadBinaryAndSignature(std::ifstream &file);
+
+    // Try to create a file associated to the current program/variant in the cache folder
+    // Returns true if the file was successfully opened and loaded, false else
+    bool tryLoadCacheFile();
+
+    // Dump the file on the disk with the name stored in this->m_cache_entry_name
+    cl_int writeCacheFile(std::vector<unsigned char*> &data);
+
+    // Retrieve device name, device vendor and driver number by calling
+    // clGetDeviceInfo
+    cl_int retrieveDeviceAndDriverInfo();
+
+    // Cache entry name 
+    std::string m_cache_entry_name;
+
+    // Path for the cache entry name
+    std::string m_path;
+
+    // Header structure of a cache entry
+    typedef struct Header_
+    {
+        char magic_key[4]; // = |C|L|B|\0, useful to know that we are loading a clblas cache entry
+        int whole_file_size; // the whole file of the size to know if the current file is complete or not
+        int header_size; // = sizeof(Header)
+        int binary_size; // kernel binary size
+        int signature_size; // variant information
+    } Header;
+
+    Header m_header;
+
+    cl_context   m_context;
+    cl_device_id m_device;
+
+    cl_program   m_program;
+
+    unsigned char * m_binary;
+    char *          m_signature;
+
+    enum VariantKind
+    {
+        INT,
+        DOUBLE,
+        STRING,
+        DATA
+    };
+
+    struct Variant
+    {
+        Variant();
+        Variant(VariantKind kind, char * data, size_t size);
+
+        ~Variant();
+
+        VariantKind m_kind;
+        size_t      m_size;
+        char *      m_data;
+        
+        static char * serialize(VariantKind kind, char * data, size_t size);
+        static Variant unserialize(char * data);
+    };
+
+    // Cache entry, useful to abstract Windows and linux 
+    // cache entry file descriptor
+    struct CacheEntry
+    {
+        CacheEntry(const std::string & filename);
+        bool exclusive_create();
+        void close();
+        bool successful_creation();
+
+    private:
+        std::string m_filename;
+        bool        m_successful_creation;
+        void *      m_handle;
+    };
+
+    // Variants
+    std::vector<Variant> m_variants;
+
+    // Indicates whether the cache should be used or not
+    bool m_cache_enabled;
+};
+
+#undef SIZE
+
+#endif
diff --git a/src/include/devinfo.h b/src/include/devinfo.h
index ef179e3..e2366ea 100644
--- a/src/include/devinfo.h
+++ b/src/include/devinfo.h
@@ -49,6 +49,8 @@ typedef enum DeviceChip {
     HEMLOCK,
     CAYMAN,
     TAHITI,
+    HAWAII,
+    BONAIRE,
     GEFORCE_GTX_480,
     GEFORCE_GTX_580,
     NUM_DEVICE_CHIPS
diff --git a/src/include/md5sum.h b/src/include/md5sum.h
new file mode 100644
index 0000000..4ef7b43
--- /dev/null
+++ b/src/include/md5sum.h
@@ -0,0 +1,50 @@
+/*
+ * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
+ * MD5 Message-Digest Algorithm (RFC 1321).
+ *
+ * Homepage:
+ * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
+ *
+ * Author:
+ * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
+ *
+ * This software was written by Alexander Peslyak in 2001.  No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted.
+ *
+ * There's ABSOLUTELY NO WARRANTY, express or implied.
+ *
+ * See md5.c for more information.
+ */
+
+#ifdef HAVE_OPENSSL
+#include <openssl/md5.h>
+#elif !defined(_MD5_H)
+#define _MD5_H
+
+/* Any 32-bit or wider unsigned integer data type will do */
+typedef unsigned int MD5_u32plus;
+
+typedef struct {
+	MD5_u32plus lo, hi;
+	MD5_u32plus a, b, c, d;
+	unsigned char buffer[64];
+	MD5_u32plus block[16];
+} MD5_CTX;
+
+extern void MD5_Init(MD5_CTX *ctx);
+extern void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size);
+extern void MD5_Final(unsigned char *result, MD5_CTX *ctx);
+
+char * md5sum (const void * data, unsigned long size);
+
+
+
+#endif
+
diff --git a/src/include/rwlock.h b/src/include/rwlock.h
new file mode 100644
index 0000000..b63c54d
--- /dev/null
+++ b/src/include/rwlock.h
@@ -0,0 +1,117 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#ifndef RWLOCK_H_
+#define RWLOCK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The rwlock functions provide an abstract implementation of a 
+ * readers-writer lock (also called a shared/exclusive lock).
+ *
+ * Simply speaking, that kind of locks allows either 
+ *  - multiple concurently read access to a data structure.
+ *  - a single write access that excludes other read or write accesses. 
+ *
+ * A read region shall start by a call to rwlockReadLock() and stop with
+ * rwlockReadUnlock()
+ *  
+ * A write region shall start by a call to rwlockWriteLock() and stop with
+ * rwlockWriteUnlock()
+ *
+ * Of course two consecutive calls of either rwlockReadLock() and
+ * rwlockWriteLock() in a single thread will cause a deadlock.
+ *
+ *
+ * Example: Access to a protected counter
+ *
+ *
+ *   int counter = 0 ;
+ *   rwlock_t *lock = rwlockInit() ; 
+ *
+ *   int getCounter() 
+ *   {
+ *      int v ; 
+ *      rwlockReadLock(lock) ; 
+ *      v = counter ; 
+ *      rwlockReadUnlock(lock) ;
+ *      return v ; 
+ *   } 
+ *   
+ *   int preIncrementCounter() 
+ *   {
+ *      int v ; 
+ *      rwlockWriteLock(lock) ; 
+ *      v = ++counter ; 
+ *      rwlockWriteUnlock(lock) ;
+ *      return v; 
+ *   } 
+ *   
+ *   
+ *
+ */
+
+
+
+typedef void* rwlock_t;
+
+/* 
+ * Create and initialize a new readers-writer lock
+ *
+ */ 
+rwlock_t* rwlockInit(void);
+
+
+/* 
+ * Destroy a readers-writer lock previously created by rwlockInit() 
+ *
+ */ 
+int rwlockDestroy(rwlock_t *rwlock);
+
+/* 
+ * Enter a (shared) read region
+ *
+ */
+int rwlockReadLock(rwlock_t *rwlock );
+
+/* 
+ * Enter a (exclusive) write region
+ *
+ */
+int rwlockWriteLock(rwlock_t *rwlock );
+
+/* 
+ * Leave a read region
+ *
+ */
+int rwlockReadUnlock(rwlock_t *rwlock );
+
+/* 
+ * Leave a write region
+ *
+ */
+int rwlockWriteUnlock(rwlock_t *rwlock );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* RWLOCK_H_ */
diff --git a/src/library/CMakeLists.txt b/src/library/CMakeLists.txt
index d89e7a7..91e57f7 100644
--- a/src/library/CMakeLists.txt
+++ b/src/library/CMakeLists.txt
@@ -20,42 +20,64 @@ set(SRC_BLAS
     blas/scimage.c
     blas/xgemv.c
     blas/xsymv.c
-    blas/xgemm.c
+    blas/xgemm.cc
     blas/xtrmm.c
-    blas/xtrsm.c
+    blas/xtrsm.cc
     blas/xsyrk.c
     blas/xsyr2k.c
     blas/xtrmv.c
     blas/xtrsv.c
     blas/xsymm.c
-	blas/xgemm2.c
+    blas/xgemm2.c
     blas/xger.c
-	blas/xsyr.c
-	blas/xsyr2.c
-	blas/xher.c
-	blas/xher2.c
-	blas/xhemv.c
-	blas/xhemm.c
-	blas/xherk.c
-	blas/xhpmv.c
-	blas/xspmv.c
-	blas/xgbmv.c
-	blas/xtbmv.c
-	blas/xshbmv.c
-	blas/xtbsv.c
-	blas/xher2k.c
-	blas/xswap.c
-	blas/xscal.c
-	blas/xcopy.c
-	blas/xaxpy.c
-	blas/xdot.c
-	blas/xrotg.c
-	blas/xrotmg.c
-	blas/xrot.c
-	blas/xrotm.c
+    blas/xsyr.c
+    blas/xsyr2.c
+    blas/xher.c
+    blas/xher2.c
+    blas/xhemv.c
+    blas/xhemm.c
+    blas/xherk.c
+    blas/xhpmv.c
+    blas/xspmv.c
+    blas/xgbmv.c
+    blas/xtbmv.c
+    blas/xshbmv.c
+    blas/xtbsv.c
+    blas/xher2k.c
+    blas/xswap.c
+    blas/xscal.cc
+    blas/xcopy.c
+    blas/xaxpy.c
+    blas/xdot.c
+    blas/xrotg.c
+    blas/xrotmg.c
+    blas/xrot.c
+    blas/xrotm.c
     blas/ixamax.c
-	blas/xnrm2.c
+    blas/xnrm2.c
     blas/xasum.c
+    blas/matrix.c
+    blas/fill.cc
+    blas/functor/functor.cc
+    blas/functor/functor_selector.cc
+    blas/functor/functor_xgemm.cc
+    blas/functor/functor_xscal.cc
+    blas/functor/functor_xtrsm.cc
+    blas/functor/functor_xscal_generic.cc
+    blas/functor/tahiti.cc
+    blas/functor/hawaii.cc
+	blas/functor/bonaire.cc
+    blas/functor/gcn_dgemm.cc
+    blas/functor/gpu_dtrsm.cc
+    blas/functor/functor_fill.cc
+	blas/functor/hawaii_dgemmChannelConflict.cc
+	blas/functor/hawaii_dgemmSplitKernel.cc
+	blas/functor/hawaii_sgemmSplitKernel.cc
+	blas/functor/gcn_dgemmCommon.cc
+	blas/functor/gcn_sgemm.cc
+	blas/functor/gcn_dgemmSmallMatrices.cc
+	blas/functor/gcn_sgemmSmallMatrices.cc
+	blas/functor/hawaii_sgemmBranchKernel.cc
 )
 
 set(SRC_BLAS_HEADERS
@@ -66,10 +88,31 @@ set(SRC_BLAS_HEADERS
     blas/include/clblas-internal.h
     blas/include/solution_seq.h
     blas/include/events.h
+    blas/functor/include/functor.h
+    blas/functor/include/functor_xgemm.h
+    blas/functor/include/functor_xscal.h
+    blas/functor/include/functor_xtrsm.h
+    blas/functor/include/functor_xscal_generic.h
+    blas/functor/include/functor_selector.h
+    blas/functor/include/tahiti.h
+    blas/functor/include/hawaii.h
+	blas/functor/include/bonaire.h
+    blas/functor/include/gcn_dgemm.h
+    blas/functor/include/gpu_dtrsm.h
+    blas/functor/include/BinaryBuild.h
+	blas/functor/include/hawaii_dgemmChannelConflict.h
+	blas/functor/include/hawaii_dgemmSplitKernel.h
+	blas/functor/include/hawaii_sgemmSplitKernel.h
+	blas/functor/include/gcn_dgemmCommon.h
+	blas/functor/include/gcn_sgemm.h
+	blas/functor/include/gcn_dgemmSmallMatrices.h
+	blas/functor/include/gcn_sgemmSmallMatrices.h
+	blas/functor/include/hawaii_sgemmBranchKernel.h
 )
 
 set(SRC_BLAS_GENERIC
     blas/generic/common.c
+    blas/generic/common2.cc
     blas/generic/blas_funcs.c
     blas/generic/events.c
     blas/generic/matrix_props.c
@@ -79,12 +122,15 @@ set(SRC_BLAS_GENERIC
     blas/generic/solution_seq.c
     blas/generic/solution_seq_make.c
     blas/generic/problem_iter.c
-    blas/generic/kernel_extra.c)
+    blas/generic/kernel_extra.c
+    blas/generic/binary_lookup.cc
+    blas/generic/functor_cache.cc
+)
 
 set(SRC_BLAS_GENS
     blas/gens/gen_init.c
     blas/gens/blas_kgen.c
-	blas/gens/blas_subgroup.c
+    blas/gens/blas_subgroup.c
     blas/gens/gen_helper.c
     blas/gens/tilemul.c
     blas/gens/fetch.c
@@ -112,22 +158,21 @@ set(SRC_BLAS_GENS
     blas/gens/legacy/trsm_lds.c
     blas/gens/legacy/trsm_img.c
     blas/gens/legacy/trsm_cached_lds.c
-
-	blas/gens/trmv_reg.cpp
-	blas/gens/ger_lds.cpp
-	blas/gens/trsv_trtri.cpp
-	blas/gens/trsv_gemv.cpp
-	blas/gens/kprintf.cpp
-	blas/gens/syr_lds.cpp
-	blas/gens/her_lds.cpp
-	blas/gens/syr2_lds.cpp
-	blas/gens/her2_lds.cpp
-	blas/gens/symm_cached.cpp
-	blas/gens/gemm_cached.cpp
-	blas/gens/gemm_tail_cached.cpp
-	blas/gens/gbmv.cpp
-	blas/gens/tuned_numbers.c
-	blas/gens/swap_reg.cpp
+    blas/gens/trmv_reg.cpp
+    blas/gens/ger_lds.cpp
+    blas/gens/trsv_trtri.cpp
+    blas/gens/trsv_gemv.cpp
+    blas/gens/kprintf.cpp
+    blas/gens/syr_lds.cpp
+    blas/gens/her_lds.cpp
+    blas/gens/syr2_lds.cpp
+    blas/gens/her2_lds.cpp
+    blas/gens/symm_cached.cpp
+    blas/gens/gemm_cached.cpp
+    blas/gens/gemm_tail_cached.cpp
+    blas/gens/gbmv.cpp
+    blas/gens/tuned_numbers.c
+    blas/gens/swap_reg.cpp
     blas/gens/scal_reg.cpp
     blas/gens/copy_reg.cpp
     blas/gens/axpy_reg.cpp
@@ -141,6 +186,11 @@ set(SRC_BLAS_GENS
     blas/gens/asum.cpp
 )
 
+#set (BIN_CL_TEMPLATES
+#dgemm_hawai.cl
+#dtrsm_gpu.cl
+#)
+
 set (SRC_CL_TEMPLATES
     gemm.cl
     gemm_helper.cl
@@ -169,6 +219,35 @@ set (SRC_CL_TEMPLATES
     iamax.cl
     nrm2.cl
     asum.cl
+    custom_gemm.cl
+    dgemm_hawai.cl
+	dgemm_hawaiiChannelConfilct.cl
+	dgemm_hawaiiSplitKernel.cl
+	sgemm_hawaiiSplitKernel.cl
+    dtrsm_gpu.cl
+
+	dgemm_gcn_SmallMatrices.cl
+	sgemm_gcn_SmallMatrices.cl
+	sgemm_gcn.cl
+)
+
+set(SRC_CL_TEMPLATES_GEN
+    dgemm_hawai.clHawaii_64.bin.cl
+    dtrsm_gpu.clHawaii_64.bin.cl
+	dgemm_hawaiiChannelConfilct.clHawaii_64.bin.cl
+	dgemm_hawaiiSplitKernel.clHawaii_64.bin.cl
+	sgemm_hawaiiSplitKernel.clHawaii_64.bin.cl	
+	sgemm_hawaiiSplitKernel.clBonaire_64.bin.cl	
+    dgemm_hawai.clTahiti_64.bin.cl
+    dtrsm_gpu.clTahiti_64.bin.cl
+	dgemm_gcn_SmallMatrices.clHawaii_64.bin.cl
+	dgemm_gcn_SmallMatrices.clTahiti_64.bin.cl
+	sgemm_gcn_SmallMatrices.clHawaii_64.bin.cl
+	sgemm_gcn_SmallMatrices.clTahiti_64.bin.cl
+    sgemm_gcn_SmallMatrices.clBonaire_64.bin.cl
+	sgemm_gcn.clHawaii_64.bin.cl
+    sgemm_gcn.clBonaire_64.bin.cl
+	sgemm_gcn.clTahiti_64.bin.cl
 )
 
 set(SRC_BLAS_GENERIC_HEADERS
@@ -179,7 +258,7 @@ set(SRC_BLAS_GENERIC_HEADERS
 set(SRC_BLAS_GENS_HEADERS
     blas/gens/fetch.h
     blas/gens/blas_kgen.h
-	blas/gens/blas_subgroup.h
+    blas/gens/blas_subgroup.h
     blas/gens/gen_helper.h
     blas/gens/init.h
     blas/gens/trxm_common.h
@@ -202,7 +281,9 @@ set(SRC_COMMON
     common/devinfo.c
     common/devinfo-cache.c
     common/mutex.c
+    common/rwlock.c
     common/trace_malloc.c
+    common/md5sum.c
 )
 
 set(SRC_COMMON_GENS
@@ -238,7 +319,10 @@ set(GLOBAL_HEADERS
     ${clBLAS_SOURCE_DIR}/include/mempat.h
     ${clBLAS_SOURCE_DIR}/include/msvc.h
     ${clBLAS_SOURCE_DIR}/include/mutex.h
+    ${clBLAS_SOURCE_DIR}/include/rwlock.h
     ${clBLAS_SOURCE_DIR}/include/solver.h
+    ${clBLAS_SOURCE_DIR}/include/md5sum.h
+    ${clBLAS_SOURCE_DIR}/include/binary_lookup.h
 )
 
 source_group(common FILES ${SRC_COMMON})
@@ -253,6 +337,7 @@ include_directories(${OPENCL_INCLUDE_DIRS}
     ${clBLAS_SOURCE_DIR}
     ${clBLAS_SOURCE_DIR}/include
     ${clBLAS_SOURCE_DIR}/library/blas/include
+    ${clBLAS_SOURCE_DIR}/library/blas/functor/include
     ${clBLAS_SOURCE_DIR}/library/tools/tune
     ${clBLAS_BINARY_DIR}/include
 )
@@ -279,10 +364,81 @@ endif()
 
 include( ExternalProject )
 ExternalProject_Add( tplgen
-    URL "${PROJECT_SOURCE_DIR}/library/tools/tplgen"
+    URL "${CMAKE_SOURCE_DIR}/library/tools/tplgen"
     INSTALL_COMMAND ""
 )
 
+ExternalProject_Add( bingen
+    URL "${CMAKE_SOURCE_DIR}/library/tools/bingen"
+    CMAKE_ARGS -DOPENCL_LIBRARIES=${OPENCL_LIBRARIES} -DOPENCL_INCLUDE_DIRS=${OPENCL_INCLUDE_DIRS}
+    INSTALL_COMMAND ""
+)
+
+message(STATUS "OCL_VERSION = ${OCL_VERSION}")
+if( OCL_VERSION STREQUAL "2.0")
+	if(EXISTS ${CMAKE_SOURCE_DIR}/flags.txt)
+		MESSAGE(STATUS "flags.txt found. will load AMD_OCL_BUILD_OPTIONS_APPEND from it.")
+		set (LOAD_CL_FLAGS TRUE)
+		file (STRINGS "${CMAKE_SOURCE_DIR}/flags.txt" OCL_FLAGS)
+		MESSAGE(STATUS "OCLFLAGS: ${OCL_FLAGS}")
+		string(REPLACE "OCL " "OCL;" OCL_FLAGS_REPLACED ${OCL_FLAGS})
+		list(GET OCL_FLAGS_REPLACED 1 OCL_FLAGS_REPLACED_1)#flags for TAHITI
+		list(GET OCL_FLAGS_REPLACED 3 OCL_FLAGS_REPLACED_3)#flags for HAWAII 1
+		list(GET OCL_FLAGS_REPLACED 5 OCL_FLAGS_REPLACED_5)#flags for HAWAII 2
+		list(GET OCL_FLAGS_REPLACED 7 OCL_FLAGS_REPLACED_7)#flags for BONAIRE
+		#MESSAGE("${OCL_FLAGS_REPLACED_7}")
+	elseif(EXISTS ${CMAKE_SOURCE_DIR}/flags_public.txt)
+		MESSAGE(STATUS "flags_public.txt found. will load AMD_OCL_BUILD_OPTIONS_APPEND from it.")
+		set (LOAD_CL_FLAGS TRUE)
+		file (STRINGS "${CMAKE_SOURCE_DIR}/flags_public.txt" OCL_FLAGS)
+		MESSAGE(STATUS "OCLFLAGS: ${OCL_FLAGS}")
+		string(REPLACE "OCL " "OCL;" OCL_FLAGS_REPLACED ${OCL_FLAGS})
+		list(GET OCL_FLAGS_REPLACED 1 OCL_FLAGS_REPLACED_1)#flags for TAHITI
+		list(GET OCL_FLAGS_REPLACED 3 OCL_FLAGS_REPLACED_3)#flags for HAWAII 1
+		list(GET OCL_FLAGS_REPLACED 5 OCL_FLAGS_REPLACED_5)#flags for HAWAII 2
+		list(GET OCL_FLAGS_REPLACED 7 OCL_FLAGS_REPLACED_7)#flags for BONAIRE	
+	else()
+		MESSAGE(STATUS "flags.txt not found. will use the default flags.")
+		set (LOAD_CL_FLAGS FALSE)
+	endif()
+else()
+	MESSAGE(STATUS "loading of compiler flags requires OpenCL 2.0. will use default flags.")
+	set (LOAD_CL_FLAGS FALSE)
+endif()
+
+#set( bingenBinaryDir "${CMAKE_BINARY_DIR}/library/tools/bingen/staging" )
+ExternalProject_Get_Property( bingen binary_dir )
+set( bingenBinaryDir "" )
+if( CMAKE_COMPILER_IS_GNUCXX )
+    set( bingenBinaryDir "${binary_dir}/staging" )
+else()
+    set( bingenBinaryDir "${binary_dir}/staging" )
+#    set( bingenBinaryDir "${binary_dir}/${CMAKE_CFG_INTDIR}" )
+endif()
+
+if (LOAD_CL_FLAGS)
+add_custom_target( GEN_CLBIN )
+add_custom_command(TARGET GEN_CLBIN
+                   PRE_BUILD
+				   COMMAND ${CMAKE_COMMAND} -DbingenBinaryDir=${bingenBinaryDir} -DCLTEMPLATE_PATH="${CMAKE_SOURCE_DIR}/library/blas/gens/clTemplates"  
+	               -DLOAD_CL_FLAGS=${LOAD_CL_FLAGS} -DTAHITI_FLAG=${OCL_FLAGS_REPLACED_1} -DHAWAII1_FLAG=${OCL_FLAGS_REPLACED_3} -DHAWAII2_FLAG=${OCL_FLAGS_REPLACED_5} -DBONAIRE_FLAG=${OCL_FLAGS_REPLACED_7} 
+				   -DENV_PATH=${ENV_PATH} -DOCL_OFFLINE_BUILD_HAWAII_KERNEL=${OCL_OFFLINE_BUILD_HAWAII_KERNEL} -DOCL_OFFLINE_BUILD_BONAIRE_KERNEL=${OCL_OFFLINE_BUILD_BONAIRE_KERNEL} 
+				   -DOCL_OFFLINE_BUILD_TAHITI_KERNEL=${OCL_OFFLINE_BUILD_TAHITI_KERNEL}
+				   -P "${CMAKE_SOURCE_DIR}/library/bingen.cmake"
+				   )	  
+add_dependencies( GEN_CLBIN bingen )
+else()
+add_custom_target( GEN_CLBIN )
+add_custom_command(TARGET GEN_CLBIN
+                   PRE_BUILD
+				   COMMAND ${CMAKE_COMMAND} -DbingenBinaryDir=${bingenBinaryDir} -DCLTEMPLATE_PATH="${CMAKE_SOURCE_DIR}/library/blas/gens/clTemplates" 
+				   -DOCL_OFFLINE_BUILD_HAWAII_KERNEL=${OCL_OFFLINE_BUILD_HAWAII_KERNEL} -DOCL_OFFLINE_BUILD_BONAIRE_KERNEL=${OCL_OFFLINE_BUILD_BONAIRE_KERNEL} 
+				   -DOCL_OFFLINE_BUILD_TAHITI_KERNEL=${OCL_OFFLINE_BUILD_TAHITI_KERNEL}
+				   -P "${CMAKE_SOURCE_DIR}/library/bingen.cmake"
+				   )
+add_dependencies( GEN_CLBIN bingen )
+endif()
+
 ExternalProject_Get_Property( tplgen binary_dir )
 
 set( tplgenBinaryDir "" )
@@ -293,10 +449,13 @@ else()
 endif()
 
 add_custom_target( GENERATE_CLT
-    COMMAND ${tplgenBinaryDir}/tplgen -o ${clBLAS_BINARY_DIR}/include ${SRC_CL_TEMPLATES}
-    WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/library/blas/gens/clTemplates
+    COMMAND ${tplgenBinaryDir}/tplgen -o ${clBLAS_BINARY_DIR}/include -i ${CMAKE_SOURCE_DIR}/library/blas/gens/clTemplates/ ${SRC_CL_TEMPLATES}
+    COMMAND ${tplgenBinaryDir}/tplgen -o ${clBLAS_BINARY_DIR}/include -i ${bingenBinaryDir}/ ${SRC_CL_TEMPLATES_GEN}
+    #WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/library/blas/gens/clTemplates
+	WORKING_DIRECTORY ${bingenBinaryDir}
 )
 
+add_dependencies( tplgen GEN_CLBIN )
 add_dependencies( GENERATE_CLT tplgen )
 
 if( CMAKE_COMPILER_IS_GNUCC )
@@ -316,10 +475,10 @@ target_link_libraries(clBLAS ${OPENCL_LIBRARIES} ${MATH_LIBRARY})
 
 # CPack configuration; include the executable into the package
 install( TARGETS clBLAS
-                EXPORT Library
-                RUNTIME DESTINATION bin${SUFFIX_BIN}
-                LIBRARY DESTINATION lib${SUFFIX_LIB}
-		ARCHIVE DESTINATION lib${SUFFIX_LIB}/import
+         EXPORT Library
+         RUNTIME DESTINATION bin${SUFFIX_BIN}
+         LIBRARY DESTINATION lib${SUFFIX_LIB}
+		 ARCHIVE DESTINATION lib${SUFFIX_LIB}/import
 		)
 
 # For debug builds, include the debug runtimes into the package for testing on non-developer machines
diff --git a/src/library/bingen.cmake b/src/library/bingen.cmake
new file mode 100644
index 0000000..6a3e778
--- /dev/null
+++ b/src/library/bingen.cmake
@@ -0,0 +1,141 @@
+
+#set( bingenBinaryDir "${CMAKE_SOURCE_DIR}/library/tools/bingen/staging" )
+set (BIN_CL_TEMPLATES_TAHITI
+${CLTEMPLATE_PATH}/dgemm_hawai.cl
+${CLTEMPLATE_PATH}/dtrsm_gpu.cl
+${CLTEMPLATE_PATH}/dgemm_gcn_SmallMatrices.cl
+${CLTEMPLATE_PATH}/sgemm_gcn.cl
+${CLTEMPLATE_PATH}/sgemm_gcn_SmallMatrices.cl
+#sgemm_hawaiiSplitKernel.cl
+)
+
+set (BIN_CL_TEMPLATES_HAWAII_CL2
+${CLTEMPLATE_PATH}/dgemm_hawaiiSplitKernel.cl
+${CLTEMPLATE_PATH}/sgemm_hawaiiSplitKernel.cl
+${CLTEMPLATE_PATH}/sgemm_gcn.cl
+${CLTEMPLATE_PATH}/sgemm_gcn_SmallMatrices.cl
+)
+
+
+set (BIN_CL_TEMPLATES_BONAIRE_CL
+${CLTEMPLATE_PATH}/sgemm_hawaiiSplitKernel.cl
+${CLTEMPLATE_PATH}/sgemm_gcn.cl
+${CLTEMPLATE_PATH}/sgemm_gcn_SmallMatrices.cl
+)
+
+
+set (BIN_CL_TEMPLATES_HAWAII_CL1
+${CLTEMPLATE_PATH}/dgemm_hawai.cl
+${CLTEMPLATE_PATH}/dtrsm_gpu.cl
+${CLTEMPLATE_PATH}/dgemm_hawaiiChannelConfilct.cl
+${CLTEMPLATE_PATH}/dgemm_gcn_SmallMatrices.cl
+)
+
+MESSAGE("run bingen")
+if(UNIX)
+MESSAGE("echo $LD_LIBRARY_PATH: $ENV{LD_LIBRARY_PATH}")
+else()
+MESSAGE("echo %PATH%: $ENV{PATH}")
+endif()
+
+if(OCL_OFFLINE_BUILD_TAHITI_KERNEL)
+	if(LOAD_CL_FLAGS)
+		if(UNIX)
+		  set(ENV{LD_LIBRARY_PATH} "${ENV_PATH}")
+		  MESSAGE("LD_LIBRARY_PATH : $ENV{LD_LIBRARY_PATH}")
+		else()
+		  set(ENV{PATH} "${ENV_PATH}")
+		  MESSAGE("PATH : $ENV{PATH}")
+		endif()
+	string(REPLACE "\\" "" TAHITI_FLAG ${TAHITI_FLAG})
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "${TAHITI_FLAG}")
+	else()
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "")
+	endif()
+	MESSAGE("TAHITI AMD_OCL_BUILD_OPTIONS_APPEND FLAGS : $ENV{AMD_OCL_BUILD_OPTIONS_APPEND}")
+	MESSAGE("command : ${bingenBinaryDir}/bingen Tahiti ${BIN_CL_TEMPLATES_TAHITI}")
+	execute_process(
+		COMMAND  ${bingenBinaryDir}/bingen Tahiti ${BIN_CL_TEMPLATES_TAHITI}
+		WORKING_DIRECTORY ${bingenBinaryDir}#
+
+		)
+else()
+	MESSAGE(STATUS "Tahiti kernels will be built at runtime. Bingen not called.")
+endif()
+
+if (OCL_OFFLINE_BUILD_HAWAII_KERNEL)
+	if(LOAD_CL_FLAGS)
+		if(UNIX)
+		  set(ENV{LD_LIBRARY_PATH} "${ENV_PATH}")
+		  MESSAGE("LD_LIBRARY_PATH : $ENV{LD_LIBRARY_PATH}")
+		else()
+		  set(ENV{PATH} "${ENV_PATH}")
+		  MESSAGE("PATH : $ENV{PATH}")
+		endif()
+	string(REPLACE "\\" "" HAWAII1_FLAG ${HAWAII1_FLAG})
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "${HAWAII1_FLAG}")
+	else()
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "")
+	endif()
+	MESSAGE("HAWAII CL1 AMD_OCL_BUILD_OPTIONS_APPEND FLAGS : $ENV{AMD_OCL_BUILD_OPTIONS_APPEND}")
+	MESSAGE("command : ${bingenBinaryDir}/bingen Hawaii ${BIN_CL_TEMPLATES_HAWAII_CL1}")
+	execute_process(
+		COMMAND  ${bingenBinaryDir}/bingen Hawaii ${BIN_CL_TEMPLATES_HAWAII_CL1}
+		WORKING_DIRECTORY ${bingenBinaryDir}#
+
+	)
+else()
+	MESSAGE(STATUS "Hawaii 1 kernels will be built at runtime. Bingen not called.")
+endif()
+
+if (OCL_OFFLINE_BUILD_HAWAII_KERNEL)
+	if(LOAD_CL_FLAGS)
+		if(UNIX)
+		  set(ENV{LD_LIBRARY_PATH} "${ENV_PATH}")
+		  MESSAGE("LD_LIBRARY_PATH : $ENV{LD_LIBRARY_PATH}")
+		else()
+		  set(ENV{PATH} "${ENV_PATH}")
+		  MESSAGE("PATH : $ENV{PATH}")
+		endif()
+	string(REPLACE "\\" "" HAWAII2_FLAG ${HAWAII2_FLAG})
+	message("HAWAII2_FLAG = ${HAWAII2_FLAG}")
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "${HAWAII2_FLAG}")
+	else()
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "")
+	endif()
+	MESSAGE("HAWAII CL2 AMD_OCL_BUILD_OPTIONS_APPEND FLAGS : $ENV{AMD_OCL_BUILD_OPTIONS_APPEND}")
+	MESSAGE("ENV : $ENV{AMD_OCL_BUILD_OPTIONS_APPEND}")
+	MESSAGE("command : ${bingenBinaryDir}/bingen Hawaii ${BIN_CL_TEMPLATES_HAWAII_CL2}")
+	execute_process(
+		COMMAND  ${bingenBinaryDir}/bingen Hawaii ${BIN_CL_TEMPLATES_HAWAII_CL2}
+		WORKING_DIRECTORY ${bingenBinaryDir}#
+	)
+else()
+	MESSAGE(STATUS "Hawaii 2 kernels will be built at runtime. Bingen not called.")
+endif()
+
+if(OCL_OFFLINE_BUILD_BONAIRE_KERNEL)
+	if(LOAD_CL_FLAGS)
+		if(UNIX)
+		  set(ENV{LD_LIBRARY_PATH} "${ENV_PATH}")
+		  MESSAGE("LD_LIBRARY_PATH : $ENV{LD_LIBRARY_PATH}")
+		else()
+		  set(ENV{PATH} "${ENV_PATH}")
+		  MESSAGE("PATH : $ENV{PATH}")
+		endif()
+	string(REPLACE "\\" "" BONAIRE_FLAG ${BONAIRE_FLAG})
+	message("BONAIRE_FLAG = ${BONAIRE_FLAG}")
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "${BONAIRE_FLAG}")
+	else()
+	set(ENV{AMD_OCL_BUILD_OPTIONS_APPEND} "")
+	endif()
+	MESSAGE("BONAIRE CL AMD_OCL_BUILD_OPTIONS_APPEND FLAGS : $ENV{AMD_OCL_BUILD_OPTIONS_APPEND}")
+	MESSAGE("command : ${bingenBinaryDir}/bingen Bonaire ${BIN_CL_TEMPLATES_BONAIRE_CL}")
+	execute_process(
+		COMMAND  ${bingenBinaryDir}/bingen Bonaire ${BIN_CL_TEMPLATES_BONAIRE_CL}
+		WORKING_DIRECTORY ${bingenBinaryDir}#
+
+		)
+else()
+	MESSAGE(STATUS "Bonaire kernels will be built at runtime. Bingen not called.")
+endif()
diff --git a/src/library/blas/fill.cc b/src/library/blas/fill.cc
new file mode 100644
index 0000000..afabc29
--- /dev/null
+++ b/src/library/blas/fill.cc
@@ -0,0 +1,272 @@
+/************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <string.h>
+#include <clBLAS.h>
+#include <limits.h>
+
+#include <functor.h>
+#include <functor_selector.h>
+
+
+#define SWAP(TYPE,a,b)  do { TYPE swap_tmp_ = a ; a = b ; b = swap_tmp_ ; } while(0) 
+
+// Return true if the area starting from pint (x,y) and of size (w,h) is 
+// within the array of size d1 x d2
+static int inside2d( size_t d1, size_t d2, int x, int y, size_t w, size_t h ) 
+{
+  // Very very large dimensions are likely a bug
+  size_t MAXDIM = ((size_t)INT_MAX)  ;
+  if ( d1 >= MAXDIM ) return 0 ; 
+  if ( d2 >= MAXDIM ) return 0 ;
+  if ( w  >= MAXDIM ) return 0 ; 
+  if ( h  >= MAXDIM ) return 0 ;
+
+  if ( x < 0 || x >= (int)d1 ) return 0 ;
+  size_t max_w = (size_t)(d1-x) ;
+  if ( w > max_w ) return 0 ;
+
+  if ( y < 0 || y >= (int)d2 ) return 0 ;
+  size_t max_h = (size_t)(d2-y) ;
+  if ( h > max_h ) return 0 ;
+       
+  return 1 ;
+}
+
+extern "C" 
+clblasStatus clblasFillVectorAsync( size_t nb_elem,
+    size_t element_size,
+    cl_mem A, size_t offA,
+    const void * host,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event)
+{
+
+  return (clblasStatus) clEnqueueFillBuffer(command_queue, 
+                                            A,
+                                            host,
+                                            element_size,
+                                            offA*element_size,
+                                            nb_elem*element_size,
+                                            numEventsInWaitList,
+                                            eventWaitList,
+                                            event);
+}
+
+
+
+extern "C" 
+clblasStatus clblasFillVector(
+    size_t nb_elem,
+    size_t element_size,
+    cl_mem A, size_t offA,
+    const void * host,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  cl_event event ;
+  cl_int err = clblasFillVectorAsync( 
+      nb_elem, 
+      element_size, 
+      A, offA, 
+      host, 
+      command_queue, 
+      numEventsInWaitList, eventWaitList,
+      &event) ;
+
+  if (err == clblasSuccess)  {
+    err = clWaitForEvents(1,&event) ; 
+  }
+
+  return (clblasStatus)  err ; 
+}
+
+extern "C" 
+clblasStatus clblasFillSubMatrixAsync(
+    clblasOrder order,
+    size_t element_size,
+    cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    int xA, int yA,
+    size_t nx, size_t ny,
+    const void *host,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event)
+{
+  // Transform Row-major into equivalent ColumnMajor so X becomes the contiguous dimension.  
+  if( order == clblasRowMajor )
+  {
+    SWAP(size_t, nrA, ncA);
+    SWAP(int,    xA,  yA);
+    SWAP(size_t, nx,  ny);
+  }
+
+  // Check that the specified area is within the array
+  if ( !inside2d( nrA,ncA, xA,yA , nx,  ny ) ) {
+    return clblasInvalidValue ;
+  }
+
+  // If the area to fill is contiguous then use clblasFillVector 
+  if ( nx==ldA || ny==1 ) 
+  {
+    return clblasFillVectorAsync( nx*ny, 
+                                  element_size, 
+                                  A,
+                                  offA + xA + yA*ldA, 
+                                  host,
+                                  command_queue,
+                                  numEventsInWaitList,
+                                  eventWaitList,
+                                  event) ;
+  }
+  else if (1) 
+  {
+    
+    clblasFill2DFunctor::Args args(A,
+                                   offA + xA + yA*ldA,  
+                                   nx,ny,
+                                   ldA,
+                                   element_size, 
+                                   host, 
+                                   command_queue,
+                                   numEventsInWaitList,
+                                   eventWaitList,
+                                   event) ;
+    
+    clblasFunctorSelector  * fselector = clblasFunctorSelector::find(command_queue);
+
+    clblasFill2DFunctor * functor = fselector->select_fill2d_specific(args);
+
+    if (!functor) 
+      return clblasInvalidValue ;
+    
+    cl_int err = functor->execute(args);
+    
+    functor->release();
+    return (clblasStatus) err ; 
+  } 
+  else
+  {
+    // Temporary: perform one fill per row
+    cl_int err ;
+    for( size_t i=0; i<ny ; i++ ) 
+      {
+          err =  clblasFillVectorAsync( nx , 
+                                       element_size, 
+                                       A,
+                                       offA + xA + (yA+i)*ldA, 
+                                       host,
+                                       command_queue,
+                                       numEventsInWaitList,
+                                       eventWaitList,
+                                       event) ;
+         if (err!=clblasSuccess) 
+           return (clblasStatus) err ;
+      }
+    return clblasSuccess ; 
+  }
+}
+
+extern "C" 
+clblasStatus clblasFillSubMatrix(
+    clblasOrder order,
+    size_t element_size,
+    cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    size_t nx, size_t ny,
+    const void *host,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  cl_event event ;
+  cl_int err = clblasFillSubMatrixAsync(order,
+                                        element_size,
+                                        A, offA, ldA,
+                                        nrA, ncA,
+                                        xA, yA,
+                                        nx, ny,
+                                        host,
+                                        command_queue,
+                                        numEventsInWaitList,
+                                        eventWaitList,
+                                        &event
+                                   ) ;
+
+  if (err == clblasSuccess) 
+  {
+    err = clWaitForEvents(1,&event) ; 
+  }
+
+  return (clblasStatus)err ; 
+}
+
+
+extern "C" 
+clblasStatus clblasFillMatrix( clblasOrder order,
+                               size_t element_size,
+                               cl_mem A, size_t offA, size_t ldA,
+                               size_t sxA, size_t syA,
+                               const void *host,
+                               cl_command_queue command_queue,
+                               cl_uint numEventsInWaitList,
+                               const cl_event *eventWaitList)
+{
+  return  clblasFillSubMatrix( order,
+                               element_size,
+                               A, offA, ldA,
+                               sxA, syA,
+                               0, 0,
+                               sxA, syA,
+                               host,
+                               command_queue,
+                               numEventsInWaitList,
+                               eventWaitList) ;
+}
+
+
+extern "C" 
+clblasStatus clblasFillMatrixAsync( clblasOrder order,
+                                    size_t element_size,
+                                    cl_mem A, size_t offA, size_t ldA,
+                                    size_t sxA, size_t syA,
+                                    const void *host,
+                                    cl_command_queue command_queue,
+                                    cl_uint numEventsInWaitList,
+                                    const cl_event *eventWaitList,
+                                    cl_event *event)
+{
+  
+  return clblasFillSubMatrixAsync( order,
+                                   element_size,
+                                   A, offA, ldA,
+                                   sxA, syA,
+                                   0, 0,
+                                   sxA, syA,
+                                   host,
+                                   command_queue,
+                                   numEventsInWaitList,
+                                   eventWaitList,
+                                   event) ;
+  
+}
+
diff --git a/src/library/blas/functor/bonaire.cc b/src/library/blas/functor/bonaire.cc
new file mode 100644
index 0000000..69b0e98
--- /dev/null
+++ b/src/library/blas/functor/bonaire.cc
@@ -0,0 +1,93 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <bonaire.h>
+//#include "gpu_dtrsm.h"
+//#include "gcn_dgemmCommon.h"
+#include "math.h"
+//#include "hawaii_dgemmChannelConflict.h"
+//#include "hawaii_dgemmSplitKernel.h"
+#include "hawaii_sgemmSplitKernel.h"
+//#include "gcn_dgemmSmallMatrices.h"
+#include "gcn_sgemmSmallMatrices.h"
+
+FunctorSelectorBonaire FunctorSelectorBonaire::instance ;
+
+
+FunctorSelectorBonaire::FunctorSelectorBonaire()
+    : clblasFunctorSelector(BONAIRE)
+{
+    
+}
+
+//
+// The selector function for DGEMM on hawaii 
+//
+//
+
+
+
+// The selector function for SGEMM on hawaii 
+clblasSgemmFunctor * FunctorSelectorBonaire::select_sgemm_specific(clblasSgemmFunctor::Args & args)
+{
+#ifdef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+	return this->clblasFunctorSelector::select_sgemm_specific(args);
+#else
+  clblasSgemmFunctor * functor;
+  bool Not_TT = ((args.transA==clblasNoTrans && args.transB==clblasTrans ) || ( args.transA==clblasNoTrans && args.transB==clblasNoTrans ) || ( args.transA==clblasTrans && args.transB==clblasNoTrans ));
+
+  bool SmallMatrices = args.M*args.N<256*256 || ((args.M%64!=0 && args.N%64!=0 && args.M<1900 &&args.N<1900 ) && (args.M%96!=0 && args.N%96!=0 && args.M<1900 &&args.N<1900 ));
+  SmallMatrices= (SmallMatrices && (args.M%32==0&&args.N%32==0)) ;
+  SmallMatrices=SmallMatrices&&Not_TT&&args.K%16==0;
+ // SmallMatrices= false;
+
+  bool useSpliKernel=((args.M%96==0 && args.N%96==0) ||!(args.M%64==0 && args.N%64==0&& args.M<4000 &&args.N<4000)) &&args.K%16==0;
+  useSpliKernel=useSpliKernel&&Not_TT;
+
+  if (args.alpha!=0 )
+  {
+        if (SmallMatrices)
+    {
+      functor = clBlasGCNSgemmSmallMatricesFunctor::provide(args, "Bonaire");
+      if (functor) 
+        return functor;
+    }
+    if ( useSpliKernel) 
+    {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+    functor = clBlashawaiiSgemmSplitKernelFunctor::provide(args, "Bonaire");
+    if (functor)
+      return functor;
+#endif
+    }
+    else
+    {
+      functor = clblasSgemmFunctorGCN::provide(args, "Bonaire");
+      if (functor) 
+        return functor;
+    }
+  }
+
+  // else use the fallback implementation
+  return this->clblasFunctorSelector::select_sgemm_specific(args);
+#endif
+}
+
+
+
+
+
+
diff --git a/src/library/blas/functor/functor.cc b/src/library/blas/functor/functor.cc
new file mode 100644
index 0000000..452989d
--- /dev/null
+++ b/src/library/blas/functor/functor.cc
@@ -0,0 +1,117 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <stdio.h>
+#include <fstream>
+#include <iostream>
+#include <ios>
+
+#include <functor.h>
+
+#include <clblas-internal.h>
+
+#include <vector>
+#include <set>
+
+
+clblasFunctor::clblasFunctor()
+    : refcount(1) // implicit retain
+{
+}
+
+
+clblasFunctor::~clblasFunctor() 
+{
+}
+
+
+void clblasFunctor::retain()
+{
+    refcount.increment() ;
+}
+
+void clblasFunctor::release()
+{
+    int n = refcount.decrement() ; 
+
+    if (n==0)
+    {
+      delete this;
+    }
+}
+
+cl_int clblasFunctor::getDeviceAndContext(cl_command_queue queue,
+                                    cl_device_id & device,
+                                    cl_context & context)
+{
+  cl_int err;
+  err = getQueueContext(queue, &context);
+
+  if (err != CL_SUCCESS)
+  {
+      return err;
+  }
+
+  err = getQueueDevice(queue, &device);
+
+  if (err != CL_SUCCESS)
+  {
+      return err;
+  }
+
+  return CL_SUCCESS;
+}
+
+cl_uint clblasFunctor::getAddressBits(cl_device_id & device)
+{
+  cl_uint bitness;
+  cl_uint error = clGetDeviceInfo(device, CL_DEVICE_ADDRESS_BITS, sizeof(cl_uint), &bitness, NULL);
+  if(error==CL_SUCCESS)
+    return bitness;
+  else
+    return 32;
+}
+
+void clblasFunctor::getCLVersion(cl_device_id & device, int&major, int& minor)
+{
+  size_t size = 0;
+  cl_int success = 0;
+  major = 0;
+  minor = 0;
+
+  success = clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_VERSION, 0, NULL, &size);
+  if (success == CL_SUCCESS)
+  {
+    char* CLVersion = NULL;
+    if (size)
+    {
+      CLVersion = new char[size];
+      if (CLVersion)
+        success = clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_VERSION, size, CLVersion, NULL);
+      else
+        return;
+    
+      if (success != CL_SUCCESS)
+        return;
+
+      char Major = CLVersion[9];
+      char Minor = CLVersion[11];
+      major = atoi(&Major);
+      minor = atoi(&Minor);
+
+    }
+  }
+}
\ No newline at end of file
diff --git a/src/library/blas/functor/functor_fill.cc b/src/library/blas/functor/functor_fill.cc
new file mode 100644
index 0000000..89a49c7
--- /dev/null
+++ b/src/library/blas/functor/functor_fill.cc
@@ -0,0 +1,156 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+#include <string>
+
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor_fill.h>
+#include <binary_lookup.h>
+
+// The internal cache of clblasFill2DFunctorFallback
+typedef clblasFunctorCache<clblasFill2DFunctorDefault, int> Cache;
+static Cache cache ; 
+
+// Generic fill kernel: require macro TYPE to be defined to an element type
+static const char FILL2D_KERNEL_SRC[] = "\n\
+__kernel void fill2d( __global TYPE * A, int offA, int ldA, TYPE value) \n\
+{\n\
+  A[ offA + get_global_id(0) + get_global_id(1) * ldA ] = value ; \n\
+}\n\
+" ;
+
+
+
+clblasFill2DFunctorDefault::clblasFill2DFunctorDefault(cl_context ctxt, 
+                                                       cl_device_id dev,
+                                                       int elemsize, 
+                                                       cl_int & err) :  m_elemsize(elemsize), m_program(0)
+{
+
+  BinaryLookup bl(ctxt, dev, "clblasFill2DFunctorDefault");
+  bl.variantInt(elemsize);
+  
+  if ( bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    m_program =  bl.getProgram();
+  }
+  else
+  {
+
+    const char * options;
+    switch( elemsize ) 
+    {
+    case 1:  options = "-DTYPE=char";     break ;
+    case 2:  options = "-DTYPE=short";    break ; 
+    case 4:  options = "-DTYPE=int";      break ;  //  or  'float'
+    case 8:  options = "-DTYPE=long";     break ;  //  or  'double' or 'complex float'
+    case 16: options = "-DTYPE=float4";   break ;  //  or  'complex float'
+    default: options = NULL ; // shall never happen  
+    }
+
+    m_program = BinaryLookup::buildProgramFromSource(FILL2D_KERNEL_SRC, ctxt, dev, err, options);
+    
+    if (m_program)
+    {
+      bl.setProgram(m_program);
+      bl.populateCache();
+    }
+    
+  }
+
+}
+
+
+clblasFill2DFunctorDefault::~clblasFill2DFunctorDefault()
+{
+  if (this->m_program) {
+    clReleaseProgram( this->m_program ) ; 
+  } 
+}
+
+clblasStatus 
+clblasFill2DFunctorDefault::execute(Args & args)
+{
+  cl_int err;
+  cl_kernel kernel = clCreateKernel( this->m_program, "fill2d",  &err);
+  if (err != CL_SUCCESS) return clblasStatus(err) ; 
+  clblasFunctor::setKernelArg<cl_mem> (kernel, 0, args.A);
+  clblasFunctor::setKernelArg<int> (kernel, 1, args.offA);
+  clblasFunctor::setKernelArg<int> (kernel, 2, args.ldA);
+  clblasFunctor::setKernelArgPtr      (kernel, 3, args.elemsize, args.value);
+  
+  size_t globalThreads[2] = { args.m , args.n };
+
+  err = clEnqueueNDRangeKernel(args.queue, kernel, 2, NULL,
+                               globalThreads, NULL , 
+                               args.numEventsInWaitList, args.eventWaitList, args.events);
+
+  clReleaseKernel(kernel) ;
+  return clblasStatus(err) ;
+}
+
+
+clblasFill2DFunctorDefault * 
+clblasFill2DFunctorDefault::provide(Args & args)
+{
+  // The current implementation only support the common scalar data 
+  // sizes from 'char' (1) to 'double complex' 16 
+  switch(args.elemsize) 
+    {
+    case 1:  
+    case 2:  
+    case 4:   
+    case 8:   
+    case 16:  
+      break ;
+    default:
+      return NULL ;
+    }
+
+  cl_device_id dev;
+  cl_context   ctxt;
+  cl_int err = clblasFunctor::getDeviceAndContext(args.queue, dev, ctxt);
+
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+
+  Cache::Lookup lookup(cache, ctxt, dev, args.elemsize ) ;
+
+  if ( lookup.ok() ) {
+    clblasFill2DFunctorDefault * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+ 
+  clblasFill2DFunctorDefault * functor = new clblasFill2DFunctorDefault(ctxt, dev, args.elemsize, err);
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+    
+}
diff --git a/src/library/blas/functor/functor_selector.cc b/src/library/blas/functor/functor_selector.cc
new file mode 100644
index 0000000..676ad35
--- /dev/null
+++ b/src/library/blas/functor/functor_selector.cc
@@ -0,0 +1,342 @@
+
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <clblas-internal.h>
+#include <functor_selector.h>
+#include <cassert>
+
+#include <functor_xscal_generic.h>
+
+// This is the selector used by default for 'unknown' targets 
+clblasFunctorSelector clblasFunctorSelector::default_instance ;
+
+typedef std::map<DeviceChip, clblasFunctorSelector*> clblasFunctorSelectorMap ;
+
+//
+// Provide a global map in which all clblasFunctorSelector will register during 
+// their constructor (with the exception of the default one). 
+// 
+// Remark: For that, we use the "construct on first use" idiom, to avoid the 
+//         infamous "static initialization order fiasco".
+//         See for example  http://www.parashift.com/c++-faq/static-init-order.html
+//
+static clblasFunctorSelectorMap & 
+getMap() 
+{
+  static clblasFunctorSelectorMap * the_map = new clblasFunctorSelectorMap ;
+  return * the_map ;
+}
+
+
+// Constructor for the non-default selector 
+clblasFunctorSelector::clblasFunctorSelector(DeviceChip chip)
+{
+    clblasFunctorSelectorMap::iterator it = getMap().find(chip);
+
+    if (it != getMap().end())
+    {
+       assert(false);
+    }
+
+    getMap()[chip] = this;
+}
+
+// Constructor for the default selector 
+clblasFunctorSelector::clblasFunctorSelector()
+{
+}
+
+clblasFunctorSelector * 
+clblasFunctorSelector::find(cl_command_queue queue) 
+{
+    cl_device_id device;
+    cl_int status = getQueueDevice(queue, &device);
+    assert( status == CL_SUCCESS );
+    return clblasFunctorSelector::find(device);
+}
+
+clblasFunctorSelector * 
+clblasFunctorSelector::find(cl_device_id device) 
+{
+    TargetDevice td;
+    td.id = device;
+    cl_int status = identifyDevice(&td);
+    assert( status == CL_SUCCESS );
+    return clblasFunctorSelector::find(td.ident.chip);
+}
+
+clblasFunctorSelector * 
+clblasFunctorSelector::find(DeviceChip chip) 
+{
+    clblasFunctorSelectorMap & the_map = getMap();
+    clblasFunctorSelectorMap::iterator it = the_map.find(chip);
+    if (it != the_map.end())
+    {
+        return it->second;
+    }
+    else 
+    {
+        return &default_instance ;
+    }
+}
+
+int clblasFunctorSelector::FindFirePro(cl_device_id device)
+{
+  char cardName [1024];
+  cl_int error = clGetDeviceInfo(device, CL_DEVICE_BOARD_NAME_AMD, sizeof(cardName), cardName, NULL);
+  if (error!=CL_SUCCESS)
+    return 0;
+  else if (strstr (cardName, "FirePro"))
+    return 1;
+  else
+    return 0;
+}
+
+// =================================================================================
+//
+// XGEMM
+//
+// =================================================================================
+
+
+clblasSgemmFunctor * 
+clblasFunctorSelector::select_sgemm_generic()  
+{ 
+    return clblasSgemmFunctorFallback::provide();
+}  
+
+clblasDgemmFunctor * 
+clblasFunctorSelector::select_dgemm_generic()  
+{ 
+    return clblasDgemmFunctorFallback::provide();
+}  
+
+
+clblasCgemmFunctor * 
+clblasFunctorSelector::select_cgemm_generic()  
+{ 
+    return clblasCgemmFunctorFallback::provide();
+}  
+
+clblasZgemmFunctor * 
+clblasFunctorSelector::select_zgemm_generic()  
+{ 
+    return clblasZgemmFunctorFallback::provide();
+}  
+
+
+clblasSgemmFunctor * 
+clblasFunctorSelector::select_sgemm_specific(clblasSgemmFunctor::Args &)
+{ 
+    return this->select_sgemm_generic() ;
+}
+
+clblasDgemmFunctor * 
+clblasFunctorSelector::select_dgemm_specific(clblasDgemmFunctor::Args &)
+{ 
+    return this->select_dgemm_generic() ;
+}
+
+clblasCgemmFunctor * 
+clblasFunctorSelector::select_cgemm_specific(clblasCgemmFunctor::Args &)
+{ 
+    return this->select_cgemm_generic() ;
+}
+
+clblasZgemmFunctor * 
+clblasFunctorSelector::select_zgemm_specific(clblasZgemmFunctor::Args &)
+{ 
+    return this->select_zgemm_generic() ;
+}
+
+
+// =================================================================================
+//
+// XTRSM
+//
+// =================================================================================
+
+
+clblasStrsmFunctor * 
+clblasFunctorSelector::select_strsm_generic()  
+{ 
+    return clblasStrsmFunctorFallback::provide();
+}  
+
+clblasDtrsmFunctor * 
+clblasFunctorSelector::select_dtrsm_generic()  
+{ 
+    return clblasDtrsmFunctorFallback::provide();
+}  
+
+
+clblasCtrsmFunctor * 
+clblasFunctorSelector::select_ctrsm_generic()  
+{ 
+    return clblasCtrsmFunctorFallback::provide();
+}  
+
+clblasZtrsmFunctor * 
+clblasFunctorSelector::select_ztrsm_generic()  
+{ 
+    return clblasZtrsmFunctorFallback::provide();
+}  
+
+
+clblasStrsmFunctor * 
+clblasFunctorSelector::select_strsm_specific(clblasStrsmFunctor::Args &)
+{ 
+    return this->select_strsm_generic() ;
+}
+
+clblasDtrsmFunctor * 
+clblasFunctorSelector::select_dtrsm_specific(clblasDtrsmFunctor::Args &)
+{ 
+    return this->select_dtrsm_generic() ;
+}
+
+clblasCtrsmFunctor * 
+clblasFunctorSelector::select_ctrsm_specific(clblasCtrsmFunctor::Args &)
+{ 
+    return this->select_ctrsm_generic() ;
+}
+
+clblasZtrsmFunctor * 
+clblasFunctorSelector::select_ztrsm_specific(clblasZtrsmFunctor::Args &)
+{ 
+    return this->select_ztrsm_generic() ;
+}
+
+
+// =================================================================================
+//
+// XSCAL
+//
+// =================================================================================
+
+
+clblasSscalFunctor * 
+clblasFunctorSelector::select_sscal_generic(clblasSscalFunctor::Args & args)  
+{ 
+  clblasSscalFunctor * functor;
+  functor = clblasSscalFunctorGeneric::provide(args);
+  if(functor) return functor;
+
+  return clblasSscalFunctorFallback::provide();
+}  
+
+clblasDscalFunctor * 
+clblasFunctorSelector::select_dscal_generic(clblasDscalFunctor::Args & args)  
+{ 
+  clblasDscalFunctor * functor;
+  functor = clblasDscalFunctorGeneric::provide(args);
+  if(functor) return functor;
+
+  return clblasDscalFunctorFallback::provide();
+}  
+
+
+clblasCscalFunctor * 
+clblasFunctorSelector::select_cscal_generic(clblasCscalFunctor::Args & args)  
+{ 
+  clblasCscalFunctor * functor;
+  functor = clblasCscalFunctorGeneric::provide(args);
+  if(functor) return functor;
+
+  return clblasCscalFunctorFallback::provide();
+}  
+
+clblasZscalFunctor * 
+clblasFunctorSelector::select_zscal_generic(clblasZscalFunctor::Args & args)  
+{ 
+  clblasZscalFunctor * functor;
+  functor = clblasZscalFunctorGeneric::provide(args);
+  if(functor) return functor;
+
+  return clblasZscalFunctorFallback::provide();
+}
+
+clblasCsscalFunctor * 
+clblasFunctorSelector::select_csscal_generic(clblasCsscalFunctor::Args & args)  
+{ 
+  clblasCsscalFunctor * functor;
+  functor = clblasCsscalFunctorGeneric::provide(args);
+  if(functor) return functor;
+  
+  return clblasCsscalFunctorFallback::provide();
+}  
+
+clblasZdscalFunctor * 
+clblasFunctorSelector::select_zdscal_generic(clblasZdscalFunctor::Args & args)  
+{ 
+  clblasZdscalFunctor * functor;
+  functor = clblasZdscalFunctorGeneric::provide(args);
+  if(functor) return functor;
+
+  return clblasZdscalFunctorFallback::provide();
+}
+
+
+clblasSscalFunctor * 
+clblasFunctorSelector::select_sscal_specific(clblasSscalFunctor::Args & args)
+{ 
+    return this->select_sscal_generic(args) ;
+}
+
+clblasDscalFunctor * 
+clblasFunctorSelector::select_dscal_specific(clblasDscalFunctor::Args & args)
+{ 
+    return this->select_dscal_generic(args) ;
+}
+
+clblasCscalFunctor * 
+clblasFunctorSelector::select_cscal_specific(clblasCscalFunctor::Args & args)
+{ 
+    return this->select_cscal_generic(args) ;
+}
+
+clblasZscalFunctor * 
+clblasFunctorSelector::select_zscal_specific(clblasZscalFunctor::Args & args)
+{ 
+    return this->select_zscal_generic(args) ;
+}
+
+clblasCsscalFunctor * 
+clblasFunctorSelector::select_csscal_specific(clblasCsscalFunctor::Args & args)
+{ 
+    return this->select_csscal_generic(args) ;
+}
+
+clblasZdscalFunctor * 
+clblasFunctorSelector::select_zdscal_specific(clblasZdscalFunctor::Args & args)
+{ 
+    return this->select_zdscal_generic(args) ;
+}
+
+
+// =================================================================================
+//
+// FILL2D
+//
+// =================================================================================
+
+
+clblasFill2DFunctor * 
+clblasFunctorSelector::select_fill2d_specific(clblasFill2DFunctor::Args & args)
+{ 
+  return clblasFill2DFunctorDefault::provide(args);
+}
diff --git a/src/library/blas/functor/functor_xgemm.cc b/src/library/blas/functor/functor_xgemm.cc
new file mode 100644
index 0000000..e5fc21f
--- /dev/null
+++ b/src/library/blas/functor/functor_xgemm.cc
@@ -0,0 +1,323 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor_xgemm.h>
+
+//
+// Common part of all XGEMM implementations using the old Solver infrastructure 
+//
+static clblasStatus
+doGemm(
+    CLBlasKargs *kargs,
+    clblasOrder order,
+    clblasTranspose transA,
+    clblasTranspose transB,
+    size_t M,
+    size_t N,
+    size_t K,
+    const cl_mem A,
+    size_t offA,
+    size_t lda,
+    const cl_mem B,
+    size_t offB,
+    size_t ldb,
+    cl_mem C,
+    size_t offC,
+    size_t ldc,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+    cl_int err;
+    ListHead seq;
+    clblasStatus retCode = clblasSuccess;
+
+    if (!clblasInitialized) {
+        return clblasNotInitialized;
+    }
+
+    /* Validate arguments */
+
+    if ((retCode = checkMemObjects(A, B, C, true, A_MAT_ERRSET, B_MAT_ERRSET, C_MAT_ERRSET))) {
+        return retCode;
+    }
+    if (K != 0) {
+        if ((retCode = checkMatrixSizes(kargs->dtype, order, transA, M,
+                                        K, A, offA, lda, A_MAT_ERRSET ))) {
+            return retCode;
+        }
+        if ((retCode = checkMatrixSizes(kargs->dtype, order, transB,
+                                        K, N, B, offB, ldb, B_MAT_ERRSET ))) {
+            return retCode;
+        }
+    }
+    if ((retCode = checkMatrixSizes(kargs->dtype, order, clblasNoTrans,
+                                    M, N, C, offC, ldc, C_MAT_ERRSET ))) {
+            return retCode;
+    }
+
+	#ifdef DEBUG_2
+	printf("DoGemm being called...\n");
+	#endif
+    kargs->order = order;
+    kargs->transA = transA;
+    kargs->transB = transB;
+    kargs->M = M;
+    kargs->N = N;
+    kargs->K = K;
+    kargs->A = A;
+    kargs->offA = offA;
+    kargs->lda.matrix = lda;
+    kargs->B = B;
+    kargs->offBX = offB;
+    kargs->ldb.matrix = ldb;
+    kargs->C = C;
+    kargs->offCY = offC;
+    kargs->ldc.matrix = ldc;
+
+    kargs->offsetM = 0;
+    kargs->offsetN = 0;
+    kargs->scimage[0] = 0;
+    kargs->scimage[1] = 0;
+
+    listInitHead(&seq);
+    err = makeSolutionSeq(CLBLAS_GEMM, kargs, numCommandQueues, commandQueues,
+        numEventsInWaitList, eventWaitList, events, &seq);
+    if (err == CL_SUCCESS) {
+        err = executeSolutionSeq(&seq);
+    }
+
+    freeSolutionSeq(&seq);
+
+    return (clblasStatus)err;
+}
+
+
+
+// =================================================================================
+//
+// class clblasSgemmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasSgemmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_FLOAT;
+  kargs.alpha.argFloat = args.alpha;
+  kargs.beta.argFloat  = args.beta;
+  
+  return doGemm(&kargs, 
+                args.order, 
+                args.transA, args.transB, 
+                args.M, args.N, args.K, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                args.C, args.offC, args.ldc, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+}
+
+clblasSgemmFunctorFallback * 
+clblasSgemmFunctorFallback::provide ()
+{
+    static clblasSgemmFunctorFallback sgemm_fallback; // The unique instance of clblasSgemmFunctorFallback
+    return & sgemm_fallback;
+}
+
+
+void 
+clblasSgemmFunctorFallback::retain()
+{
+  // clblasSgemmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasSgemmFunctorFallback::release()
+{
+  // clblasSgemmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasDgemmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasDgemmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_DOUBLE;
+  kargs.alpha.argDouble = args.alpha;
+  kargs.beta.argDouble  = args.beta;
+  
+  return doGemm(&kargs, 
+                args.order, 
+                args.transA, args.transB, 
+                args.M, args.N, args.K, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                args.C, args.offC, args.ldc, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+}
+
+clblasDgemmFunctorFallback * 
+clblasDgemmFunctorFallback::provide ()
+{
+  static clblasDgemmFunctorFallback dgemm_fallback;   // The unique instance of clblasDgemmFunctorFallback
+  return & dgemm_fallback;
+}
+
+
+void 
+clblasDgemmFunctorFallback::retain()
+{
+  // clblasDgemmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasDgemmFunctorFallback::release()
+{
+  // clblasDgemmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
+
+// =================================================================================
+//
+// class clblasCgemmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasCgemmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_COMPLEX_FLOAT;
+  kargs.alpha.argFloatComplex = args.alpha;
+  kargs.beta.argFloatComplex  = args.beta;
+  
+  return doGemm(&kargs, 
+                args.order, 
+                args.transA, args.transB, 
+                args.M, args.N, args.K, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                args.C, args.offC, args.ldc, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+}
+
+clblasCgemmFunctorFallback * 
+clblasCgemmFunctorFallback::provide ()
+{
+  static clblasCgemmFunctorFallback cgemm_fallback;   // The unique instance of clblasCgemmFunctorFallback
+  return & cgemm_fallback;
+}
+
+
+void 
+clblasCgemmFunctorFallback::retain()
+{
+  // clblasCgemmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasCgemmFunctorFallback::release()
+{
+  // clblasCgemmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
+
+// =================================================================================
+//
+// class clblasZgemmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasZgemmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_COMPLEX_DOUBLE;
+  kargs.alpha.argDoubleComplex = args.alpha;
+  kargs.beta.argDoubleComplex  = args.beta;
+  
+  return doGemm(&kargs, 
+                args.order, 
+                args.transA, args.transB, 
+                args.M, args.N, args.K, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                args.C, args.offC, args.ldc, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+}
+
+clblasZgemmFunctorFallback * 
+clblasZgemmFunctorFallback::provide ()
+{
+  static clblasZgemmFunctorFallback cgemm_fallback;   // The unique instance of clblasZgemmFunctorFallback
+  return & cgemm_fallback;
+}
+
+
+void 
+clblasZgemmFunctorFallback::retain()
+{
+  // clblasZgemmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasZgemmFunctorFallback::release()
+{
+  // clblasZgemmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
diff --git a/src/library/blas/functor/functor_xscal.cc b/src/library/blas/functor/functor_xscal.cc
new file mode 100644
index 0000000..1615499
--- /dev/null
+++ b/src/library/blas/functor/functor_xscal.cc
@@ -0,0 +1,410 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor_xscal.h>
+
+static clblasStatus
+doScal(
+       CLBlasKargs *kargs,
+       size_t N,
+       cl_mem X,
+       size_t offx,
+       int incx,
+       cl_uint numCommandQueues,
+       cl_command_queue *commandQueues,
+       cl_uint numEventsInWaitList,
+       const cl_event *eventWaitList,
+       cl_event *events)
+{
+  cl_int err;
+  ListHead seq;
+  clblasStatus retCode = clblasSuccess;
+
+  if (!clblasInitialized) {
+    return clblasNotInitialized;
+  }
+
+  /* Validate arguments */
+
+  retCode = checkMemObjects(X, X, X, false, X_VEC_ERRSET, X_VEC_ERRSET, X_VEC_ERRSET );
+  if (retCode) {
+    printf("Invalid mem object..\n");
+    return retCode;
+  }
+
+  // Check wheather enough memory was allocated
+
+  if ((retCode = checkVectorSizes(kargs->dtype, N, X, offx, incx, X_VEC_ERRSET))) {
+    printf("Invalid Size for X\n");
+    return retCode;
+  }
+  ///////////////////////////////////////////////////////////////
+
+  if ((commandQueues == NULL) || (numCommandQueues == 0))
+    {
+      return clblasInvalidValue;
+    }
+
+  /* numCommandQueues will be hardcoded to 1 as of now. No multi-gpu support */
+  numCommandQueues = 1;
+  if (commandQueues[0] == NULL)
+    {
+      return clblasInvalidCommandQueue;
+    }
+
+  if ((numEventsInWaitList !=0) && (eventWaitList == NULL))
+    {
+      return clblasInvalidEventWaitList;
+    }
+
+  kargs->N = N;
+  kargs->A = X;
+  kargs->offBX = offx;
+  kargs->ldb.vector = incx;	// Will be using this as incx
+
+  if(incx < 0) {    // According to Netlib - return for negative incx
+    return clblasSuccess;
+  }
+
+  listInitHead(&seq);
+  err = makeSolutionSeq(CLBLAS_SCAL, kargs, numCommandQueues, commandQueues,
+                        numEventsInWaitList, eventWaitList, events, &seq);
+  if (err == CL_SUCCESS) {
+    err = executeSolutionSeq(&seq);
+  }
+
+  freeSolutionSeq(&seq);
+
+  return (clblasStatus)err;
+}
+
+
+// =================================================================================
+//
+// class clblasSscalFunctorFallback
+//
+// =================================================================================
+
+static clblasSscalFunctorFallback sscal_fallback;
+
+clblasStatus clblasSscalFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_FLOAT;
+  kargs.alpha.argFloat = args.alpha;
+
+  return doScal(&kargs,
+                args.N,
+                args.X,
+                args.offx,
+                args.incx,
+                1,
+                &args.queue,
+                args.numEventsInWaitList,
+                args.eventWaitList,
+                args.events);
+
+}
+
+
+clblasSscalFunctorFallback * clblasSscalFunctorFallback::provide ()
+{
+    static clblasSscalFunctorFallback sscal_fallback;
+    return & sscal_fallback;
+}
+
+
+void clblasSscalFunctorFallback::retain()
+{
+  // clblasSscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+void clblasSscalFunctorFallback::release()
+{
+  // clblasDscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+
+
+// =================================================================================
+//
+// class clblasDscalFunctorFallback
+//
+// =================================================================================
+
+static clblasDscalFunctorFallback dscal_fallback;
+
+clblasStatus clblasDscalFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_DOUBLE;
+  kargs.alpha.argDouble = args.alpha;
+
+  return doScal(&kargs,
+                args.N,
+                args.X,
+                args.offx,
+                args.incx,
+                1,
+                &args.queue,
+                args.numEventsInWaitList,
+                args.eventWaitList,
+                args.events);
+
+}
+
+
+clblasDscalFunctorFallback * clblasDscalFunctorFallback::provide ()
+{
+    static clblasDscalFunctorFallback dscal_fallback;
+    return & dscal_fallback;
+}
+
+
+void clblasDscalFunctorFallback::retain()
+{
+  // clblasDscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+void clblasDscalFunctorFallback::release()
+{
+  // clblasDscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasCscalFunctorFallback
+//
+// =================================================================================
+
+static clblasCscalFunctorFallback cscal_fallback;
+
+clblasStatus clblasCscalFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_COMPLEX_FLOAT;
+  kargs.alpha.argFloatComplex = args.alpha;
+
+  return doScal(&kargs,
+                args.N,
+                args.X,
+                args.offx,
+                args.incx,
+                1,
+                &args.queue,
+                args.numEventsInWaitList,
+                args.eventWaitList,
+                args.events);
+
+}
+
+
+clblasCscalFunctorFallback * clblasCscalFunctorFallback::provide ()
+{
+    static clblasCscalFunctorFallback cscal_fallback;
+    return & cscal_fallback;
+}
+
+
+void clblasCscalFunctorFallback::retain()
+{
+  // clblasCscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+void clblasCscalFunctorFallback::release()
+{
+  // clblasCscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasZscalFunctorFallback
+//
+// =================================================================================
+
+static clblasZscalFunctorFallback zscal_fallback;
+
+clblasStatus clblasZscalFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_COMPLEX_DOUBLE;
+  kargs.alpha.argDoubleComplex = args.alpha;
+
+  return doScal(&kargs,
+                args.N,
+                args.X,
+                args.offx,
+                args.incx,
+                1,
+                &args.queue,
+                args.numEventsInWaitList,
+                args.eventWaitList,
+                args.events);
+
+}
+
+
+clblasZscalFunctorFallback * clblasZscalFunctorFallback::provide ()
+{
+    static clblasZscalFunctorFallback zscal_fallback;
+    return & zscal_fallback;
+}
+
+
+void clblasZscalFunctorFallback::retain()
+{
+  // clblasZscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+void clblasZscalFunctorFallback::release()
+{
+  // clblasZscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasCsscalFunctorFallback
+//
+// =================================================================================
+
+static clblasCsscalFunctorFallback csscal_fallback;
+
+clblasStatus clblasCsscalFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+  FloatComplex fAlpha;
+  CREAL(fAlpha) = args.alpha;
+  CIMAG(fAlpha) = 0.0f;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.alpha.argFloatComplex = fAlpha;
+  kargs.dtype = TYPE_COMPLEX_FLOAT;
+
+  return doScal(&kargs,
+                args.N,
+                args.X,
+                args.offx,
+                args.incx,
+                1,
+                &args.queue,
+                args.numEventsInWaitList,
+                args.eventWaitList,
+                args.events);
+
+}
+
+
+clblasCsscalFunctorFallback * clblasCsscalFunctorFallback::provide ()
+{
+    static clblasCsscalFunctorFallback csscal_fallback;
+    return & csscal_fallback;
+}
+
+
+void clblasCsscalFunctorFallback::retain()
+{
+  // clblasCsscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+void clblasCsscalFunctorFallback::release()
+{
+  // clblasCsscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasZdscalFunctorFallback
+//
+// =================================================================================
+
+static clblasZdscalFunctorFallback zdscal_fallback;
+
+clblasStatus clblasZdscalFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+  DoubleComplex fAlpha;
+  CREAL(fAlpha) = args.alpha;
+  CIMAG(fAlpha) = 0.0f;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.alpha.argDoubleComplex = fAlpha;
+  kargs.dtype = TYPE_COMPLEX_DOUBLE;
+
+  return doScal(&kargs,
+                args.N,
+                args.X,
+                args.offx,
+                args.incx,
+                1,
+                &args.queue,
+                args.numEventsInWaitList,
+                args.eventWaitList,
+                args.events);
+
+}
+
+
+clblasZdscalFunctorFallback * clblasZdscalFunctorFallback::provide ()
+{
+    static clblasZdscalFunctorFallback zdscal_fallback;
+    return & zdscal_fallback;
+}
+
+
+void clblasZdscalFunctorFallback::retain()
+{
+  // clblasZdscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
+
+
+void clblasZdscalFunctorFallback::release()
+{
+  // clblasZdscalFunctorFallback has a single global instance
+  // and shall never be freed
+}
diff --git a/src/library/blas/functor/functor_xscal_generic.cc b/src/library/blas/functor/functor_xscal_generic.cc
new file mode 100644
index 0000000..971ae9a
--- /dev/null
+++ b/src/library/blas/functor/functor_xscal_generic.cc
@@ -0,0 +1,439 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor_xscal_generic.h>
+#include <binary_lookup.h>
+
+#include <kprintf.hpp>
+#include <scal.clT>
+
+
+template <class FUNCTOR>
+static cl_program xcalCreateProgram(cl_context ctxt, 
+                                    cl_device_id dev,
+                                    char type,
+                                    const char* functorName,
+                                    const typename FUNCTOR::Data & data,
+                                    cl_int & err)
+{
+  BinaryLookup bl(ctxt, dev, functorName);
+
+  bl.variantInt(data.vecLen);
+  bl.variantInt(data.doVLOAD);
+  bl.variantInt(data.noUnity);
+
+  if ( bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    return bl.getProgram();
+  }
+  else
+  {
+    char tempTemplate[32*1024];
+    char buf         [32*1024];
+    cl_program scalProgram;
+
+    strcpy( tempTemplate, (char*)scal_kernel );
+    kprintf kobj( type, data.vecLen, data.doVLOAD, data.doVLOAD);
+    kobj.spit((char*)buf, tempTemplate);
+
+    const char * options;
+    if(data.noUnity)
+    {
+      options = "-DINCX_NONUNITY";
+    }else{
+      options = "";
+    }
+
+    scalProgram = BinaryLookup::buildProgramFromSource(buf, ctxt, dev, err , options);
+    
+    if(scalProgram)
+    {
+      bl.setProgram(scalProgram);
+      bl.populateCache();
+    }
+    
+    return scalProgram;
+  }
+
+}
+
+
+template <typename TA>
+static clblasStatus xscalExecute(cl_command_queue queue, 
+                                 cl_program program, 
+                                 const char * kernelName, 
+                                 TA alpha, 
+                                 cl_mem X, 
+                                 unsigned int N, 
+                                 unsigned int offx, 
+                                 int incx,
+                                 size_t nThreads,
+                                 cl_uint numEventsInWaitList,
+                                 const cl_event *eventWaitList,
+                                 cl_event *events)
+{
+  cl_int err;
+  cl_kernel kernel = clCreateKernel( program, kernelName,  &err);
+  if (err != CL_SUCCESS) return clblasStatus(err) ; 
+
+  clblasFunctor::setKernelArg<TA>     (kernel, 0, alpha);
+  clblasFunctor::setKernelArg<cl_mem> (kernel, 1, X);
+  clblasFunctor::setKernelArg<unsigned int>   (kernel, 2, N);
+  clblasFunctor::setKernelArg<unsigned int>   (kernel, 3, offx);
+  clblasFunctor::setKernelArg<int>    (kernel, 4, incx);
+  
+  size_t globalThreads[1] = { nThreads };
+
+  err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL,
+                               globalThreads, NULL , 
+                               numEventsInWaitList, eventWaitList, events);
+
+  clReleaseKernel(kernel) ;
+  return clblasStatus(err) ;
+}
+
+
+template <class FUNCTOR>
+static FUNCTOR * xscalProvide(typename FUNCTOR::Args & args)
+{
+  cl_device_id dev;
+  cl_context   ctxt;
+  cl_int err = clblasFunctor::getDeviceAndContext(args.queue, dev, ctxt);
+
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+
+  unsigned int vecLen  = 1 ;      //to customize according to the device and args
+  bool         doVLOAD = false ;  //TO DO (see scal_reg.cpp)
+  bool         noUnity = (args.incx != 1) ;
+
+
+  typename FUNCTOR::Data data = { vecLen , doVLOAD , noUnity};
+
+  typename FUNCTOR::Cache::Lookup lookup(FUNCTOR::cache, ctxt, dev, data ) ;
+
+  if ( lookup.ok() ){
+    FUNCTOR * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+ 
+  FUNCTOR * functor = new FUNCTOR(ctxt, dev, data, err);
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+}
+
+
+// =================================================================================
+//
+// class clblasSscalFunctorGeneric
+//
+// =================================================================================
+
+clblasSscalFunctorGeneric::clblasSscalFunctorGeneric(cl_context ctxt, 
+                                                     cl_device_id dev,
+                                                     const Data & data,
+                                                     cl_int & err) : program(0)
+{
+  this->program = xcalCreateProgram<clblasSscalFunctorGeneric>(ctxt, dev, 'S', "clblasSscalFunctorGeneric", data, err);
+}
+
+clblasSscalFunctorGeneric::~clblasSscalFunctorGeneric()
+{
+  if (this->program) {
+    clReleaseProgram( this->program ) ; 
+  } 
+}
+
+clblasStatus clblasSscalFunctorGeneric::execute(Args & args)
+{
+  size_t nThreads = args.N; //to customize according to the device, data and args
+  return xscalExecute<cl_float>(args.queue, 
+                                this->program, 
+                                "Sscal_kernel",
+                                args.alpha, 
+                                args.X, 
+                                args.N, 
+                                args.offx, 
+                                args.incx,
+                                nThreads,
+                                args.numEventsInWaitList,
+                                args.eventWaitList,
+                                args.events);
+}
+
+clblasSscalFunctorGeneric::Cache clblasSscalFunctorGeneric::cache;
+
+clblasSscalFunctorGeneric * clblasSscalFunctorGeneric::provide (clblasSscalFunctor::Args & args)
+{
+  return xscalProvide<clblasSscalFunctorGeneric>(args);
+}
+
+
+
+// =================================================================================
+//
+// class clblasDscalFunctorGeneric
+//
+// =================================================================================
+
+clblasDscalFunctorGeneric::clblasDscalFunctorGeneric(cl_context ctxt, 
+                                                     cl_device_id dev,
+                                                     const Data & data,
+                                                     cl_int & err) : program(0)
+{
+  this->program = xcalCreateProgram<clblasDscalFunctorGeneric>(ctxt, dev, 'D', "clblasDscalFunctorGeneric", data, err);
+}
+
+clblasDscalFunctorGeneric::~clblasDscalFunctorGeneric()
+{
+  if (this->program) {
+    clReleaseProgram( this->program ) ; 
+  } 
+}
+
+clblasStatus clblasDscalFunctorGeneric::execute(Args & args)
+{
+  size_t nThreads = args.N; //to customize according to the device, data and args
+  return xscalExecute<cl_double>(args.queue, 
+                                 this->program, 
+                                 "Dscal_kernel",
+                                 args.alpha, 
+                                 args.X, 
+                                 args.N, 
+                                 args.offx, 
+                                 args.incx,
+                                 nThreads,
+                                 args.numEventsInWaitList,
+                                 args.eventWaitList,
+                                 args.events);
+}
+
+clblasDscalFunctorGeneric::Cache clblasDscalFunctorGeneric::cache;
+
+clblasDscalFunctorGeneric * clblasDscalFunctorGeneric::provide (clblasDscalFunctor::Args & args)
+{
+  return xscalProvide<clblasDscalFunctorGeneric>(args);
+}
+
+
+// =================================================================================
+//
+// class clblasCscalFunctorGeneric
+//
+// =================================================================================
+
+clblasCscalFunctorGeneric::clblasCscalFunctorGeneric(cl_context ctxt, 
+                                                     cl_device_id dev,
+                                                     const Data & data,
+                                                     cl_int & err) : program(0)
+{
+  this->program = xcalCreateProgram<clblasCscalFunctorGeneric>(ctxt, dev, 'C', "clblasCscalFunctorGeneric", data, err);
+}
+
+clblasCscalFunctorGeneric::~clblasCscalFunctorGeneric()
+{
+  if (this->program) {
+    clReleaseProgram( this->program ) ; 
+  } 
+}
+
+clblasStatus clblasCscalFunctorGeneric::execute(Args & args)
+{
+  size_t nThreads = args.N; //to customize according to the device, data and args
+  return xscalExecute<cl_float2>(args.queue, 
+                                 this->program, 
+                                 "Cscal_kernel",
+                                 args.alpha, 
+                                 args.X, 
+                                 args.N, 
+                                 args.offx, 
+                                 args.incx,
+                                 nThreads,
+                                 args.numEventsInWaitList,
+                                 args.eventWaitList,
+                                 args.events);
+}
+
+clblasCscalFunctorGeneric::Cache clblasCscalFunctorGeneric::cache;
+
+clblasCscalFunctorGeneric * clblasCscalFunctorGeneric::provide (clblasCscalFunctor::Args & args)
+{
+  return xscalProvide<clblasCscalFunctorGeneric>(args);
+}
+
+
+// =================================================================================
+//
+// class clblasZscalFunctorGeneric
+//
+// =================================================================================
+
+clblasZscalFunctorGeneric::clblasZscalFunctorGeneric(cl_context ctxt, 
+                                                     cl_device_id dev,
+                                                     const Data & data,
+                                                     cl_int & err) : program(0)
+{
+  this->program = xcalCreateProgram<clblasZscalFunctorGeneric>(ctxt, dev, 'Z', "clblasZscalFunctorGeneric", data, err);
+}
+
+clblasZscalFunctorGeneric::~clblasZscalFunctorGeneric()
+{
+  if (this->program) {
+    clReleaseProgram( this->program ) ; 
+  } 
+}
+
+clblasStatus clblasZscalFunctorGeneric::execute(Args & args)
+{
+  size_t nThreads = args.N; //to customize according to the device, data and args
+  return xscalExecute<cl_double2>(args.queue, 
+                                  this->program, 
+                                  "Zscal_kernel",
+                                  args.alpha, 
+                                  args.X, 
+                                  args.N, 
+                                  args.offx, 
+                                  args.incx,
+                                  nThreads,
+                                  args.numEventsInWaitList,
+                                  args.eventWaitList,
+                                  args.events);
+}
+
+clblasZscalFunctorGeneric::Cache clblasZscalFunctorGeneric::cache;
+
+clblasZscalFunctorGeneric * clblasZscalFunctorGeneric::provide (clblasZscalFunctor::Args & args)
+{
+  return xscalProvide<clblasZscalFunctorGeneric>(args);
+}
+
+// =================================================================================
+//
+// class clblasCsscalFunctorGeneric
+//
+// =================================================================================
+
+clblasCsscalFunctorGeneric::clblasCsscalFunctorGeneric(cl_context ctxt, 
+                                                     cl_device_id dev,
+                                                     const Data & data,
+                                                     cl_int & err) : program(0)
+{
+  this->program = xcalCreateProgram<clblasCsscalFunctorGeneric>(ctxt, dev, 'C', "clblasCsscalFunctorGeneric", data, err);
+}
+
+clblasCsscalFunctorGeneric::~clblasCsscalFunctorGeneric()
+{
+  if (this->program) {
+    clReleaseProgram( this->program ) ; 
+  } 
+}
+
+clblasStatus clblasCsscalFunctorGeneric::execute(Args & args)
+{
+  size_t nThreads = args.N; //to customize according to the device, data and args
+
+  cl_float2 l_alpha;
+  l_alpha.s[0] = args.alpha ;
+  l_alpha.s[1] = 0.f ;
+
+  return xscalExecute<cl_float2>(args.queue, 
+                                 this->program, 
+                                 "Cscal_kernel",
+                                 l_alpha, 
+                                 args.X, 
+                                 args.N, 
+                                 args.offx, 
+                                 args.incx,
+                                 nThreads,
+                                 args.numEventsInWaitList,
+                                 args.eventWaitList,
+                                 args.events);
+}
+
+clblasCsscalFunctorGeneric::Cache clblasCsscalFunctorGeneric::cache;
+
+clblasCsscalFunctorGeneric * clblasCsscalFunctorGeneric::provide (clblasCsscalFunctor::Args & args)
+{
+  return xscalProvide<clblasCsscalFunctorGeneric>(args);
+}
+
+
+// =================================================================================
+//
+// class clblasZdscalFunctorGeneric
+//
+// =================================================================================
+
+clblasZdscalFunctorGeneric::clblasZdscalFunctorGeneric(cl_context ctxt, 
+                                                     cl_device_id dev,
+                                                     const Data & data,
+                                                     cl_int & err) : program(0)
+{
+  this->program = xcalCreateProgram<clblasZdscalFunctorGeneric>(ctxt, dev, 'Z', "clblasZdscalFunctorGeneric", data, err);
+}
+
+clblasZdscalFunctorGeneric::~clblasZdscalFunctorGeneric()
+{
+  if (this->program) {
+    clReleaseProgram( this->program ) ; 
+  } 
+}
+
+clblasStatus clblasZdscalFunctorGeneric::execute(Args & args)
+{
+  size_t nThreads = args.N; //to customize according to the device, data and args
+
+  cl_double2 l_alpha;
+  l_alpha.s[0] = args.alpha ;
+  l_alpha.s[1] = 0.f ;
+
+  return xscalExecute<cl_double2>(args.queue, 
+                                  this->program, 
+                                  "Zscal_kernel",
+                                  l_alpha, 
+                                  args.X, 
+                                  args.N, 
+                                  args.offx, 
+                                  args.incx,
+                                  nThreads,
+                                  args.numEventsInWaitList,
+                                  args.eventWaitList,
+                                  args.events);
+}
+
+clblasZdscalFunctorGeneric::Cache clblasZdscalFunctorGeneric::cache;
+
+clblasZdscalFunctorGeneric * clblasZdscalFunctorGeneric::provide (clblasZdscalFunctor::Args & args)
+{
+  return xscalProvide<clblasZdscalFunctorGeneric>(args);
+}
diff --git a/src/library/blas/functor/functor_xtrsm.cc b/src/library/blas/functor/functor_xtrsm.cc
new file mode 100644
index 0000000..25c1679
--- /dev/null
+++ b/src/library/blas/functor/functor_xtrsm.cc
@@ -0,0 +1,336 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor_xtrsm.h>
+
+//
+// Common part of all XTRSM implementations using the old Solver infrastructure 
+//
+static clblasStatus
+doTrsm(
+    CLBlasKargs *kargs,
+    clblasOrder order,
+    clblasSide side,
+    clblasUplo uplo,
+    clblasTranspose transA,
+    clblasDiag diag,
+    size_t M,
+    size_t N,
+    const cl_mem A,
+    size_t offA,
+    size_t lda,
+    cl_mem B,
+    size_t offB,
+    size_t ldb,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+    cl_int err;
+    ListHead seq;
+    size_t msize;
+    clblasStatus retCode = clblasSuccess;
+
+    if (!clblasInitialized) {
+        return clblasNotInitialized;
+    }
+
+    /* Validate arguments */
+
+    if ((retCode = checkMemObjects(A, B, NULL, false, A_MAT_ERRSET, B_MAT_ERRSET, END_ERRSET ))) {
+        return retCode;
+    }
+    msize = (side == clblasLeft) ? M : N;
+
+    if ((retCode = checkMatrixSizes(kargs->dtype, order, transA, msize, msize,
+                                    A, offA, lda, A_MAT_ERRSET ))) {
+        return retCode;
+    }
+    if ((retCode = checkMatrixSizes(kargs->dtype, order, clblasNoTrans, M, N,
+                                    B, offB, ldb, B_MAT_ERRSET ))) {
+        return retCode;
+    }
+
+    kargs->order = order;
+    kargs->side = side;
+    kargs->uplo = uplo;
+    kargs->transA = transA;
+    kargs->diag = diag;
+    kargs->M = M;
+    kargs->N = N;
+    kargs->A = A;
+    kargs->offA = offA;
+    kargs->lda.matrix = lda;
+    kargs->B = B;
+    kargs->offBX = offB;
+    kargs->ldb.matrix = ldb;
+    // Store original problem size in K, this is used to know it while
+    // calculating result by parts using M or N as part size
+    if (side == clblasLeft) {
+        kargs->K = M;
+    }
+    else {
+        kargs->K = N;
+    }
+
+    kargs->offsetM = 0;
+    kargs->offsetN = 0;
+    kargs->scimage[0] = 0;
+
+#ifndef TRXM_MULTIPLE_QUEUES
+    if (numCommandQueues != 0) {
+        numCommandQueues = 1;
+    }
+#endif
+
+    listInitHead(&seq);
+    err = makeSolutionSeq(CLBLAS_TRSM, kargs, numCommandQueues, commandQueues,
+        numEventsInWaitList, eventWaitList, events, &seq);
+    if (err == CL_SUCCESS) {
+        err = executeSolutionSeq(&seq);
+    }
+
+    freeSolutionSeq(&seq);
+
+    return (clblasStatus)err;
+}
+
+
+
+// =================================================================================
+//
+// class clblasStrsmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasStrsmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_FLOAT;
+  kargs.alpha.argFloat = args.alpha;
+  
+  return doTrsm(&kargs, 
+                args.order, 
+                args.side, 
+                args.uplo, 
+                args.transA, 
+                args.diag, 
+                args.M, args.N, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+
+}
+
+clblasStrsmFunctorFallback * 
+clblasStrsmFunctorFallback::provide ()
+{
+    static clblasStrsmFunctorFallback strsm_fallback; // The unique instance of clblasStrsmFunctorFallback
+    return & strsm_fallback;
+}
+
+
+void 
+clblasStrsmFunctorFallback::retain()
+{
+  // clblasStrsmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasStrsmFunctorFallback::release()
+{
+  // clblasStrsmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasDtrsmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasDtrsmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_DOUBLE;
+  kargs.alpha.argDouble = args.alpha;
+  
+  return doTrsm(&kargs, 
+                args.order, 
+                args.side, 
+                args.uplo, 
+                args.transA, 
+                args.diag, 
+                args.M, args.N, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+
+}
+
+clblasDtrsmFunctorFallback * 
+clblasDtrsmFunctorFallback::provide ()
+{
+    static clblasDtrsmFunctorFallback dtrsm_fallback; // The unique instance of clblasDtrsmFunctorFallback
+    return & dtrsm_fallback;
+}
+
+
+void 
+clblasDtrsmFunctorFallback::retain()
+{
+  // clblasDtrsmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasDtrsmFunctorFallback::release()
+{
+  // clblasDtrsmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasCtrsmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasCtrsmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_COMPLEX_FLOAT;
+  kargs.alpha.argFloatComplex = args.alpha;
+  
+  return doTrsm(&kargs, 
+                args.order, 
+                args.side, 
+                args.uplo, 
+                args.transA, 
+                args.diag, 
+                args.M, args.N, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+
+}
+
+clblasCtrsmFunctorFallback * 
+clblasCtrsmFunctorFallback::provide ()
+{
+    static clblasCtrsmFunctorFallback ctrsm_fallback; // The unique instance of clblasCtrsmFunctorFallback
+    return & ctrsm_fallback;
+}
+
+
+void 
+clblasCtrsmFunctorFallback::retain()
+{
+  // clblasCtrsmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasCtrsmFunctorFallback::release()
+{
+  // clblasCtrsmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
+
+
+// =================================================================================
+//
+// class clblasZtrsmFunctorFallback
+//
+// =================================================================================
+
+clblasStatus 
+clblasZtrsmFunctorFallback::execute(Args & args)
+{
+  CLBlasKargs kargs;
+
+  memset(&kargs, 0, sizeof(kargs));
+  kargs.dtype = TYPE_COMPLEX_DOUBLE;
+  kargs.alpha.argDoubleComplex = args.alpha;
+  
+  return doTrsm(&kargs, 
+                args.order, 
+                args.side, 
+                args.uplo, 
+                args.transA, 
+                args.diag, 
+                args.M, args.N, 
+                args.A, args.offA, args.lda,
+                args.B, args.offB, args.ldb, 
+                1, &args.queue,
+                args.numEventsInWaitList, 
+                args.eventWaitList, 
+                args.events);
+
+}
+
+clblasZtrsmFunctorFallback * 
+clblasZtrsmFunctorFallback::provide ()
+{
+    static clblasZtrsmFunctorFallback ztrsm_fallback; // The unique instance of clblasZtrsmFunctorFallback
+    return & ztrsm_fallback;
+}
+
+
+void 
+clblasZtrsmFunctorFallback::retain()
+{
+  // clblasZtrsmFunctorFallback has a single global instance 
+  // and shall never be freed 
+}
+
+void 
+clblasZtrsmFunctorFallback::release()
+{
+  // clblasZtrsmFunctorFallback has a single global instance 
+  // and shall never be freed
+}
diff --git a/src/library/blas/functor/gcn_dgemm.cc b/src/library/blas/functor/gcn_dgemm.cc
new file mode 100644
index 0000000..daea87b
--- /dev/null
+++ b/src/library/blas/functor/gcn_dgemm.cc
@@ -0,0 +1,1035 @@
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+#include <gcn_dgemm.h>
+
+#include "BinaryBuild.h"
+
+//for the moment only managing source code and cl binary
+
+//#if BUILD_KERNEL_FROM_STRING
+//#include "dgemm_hawai.clT"
+//#else 
+//#include "dgemm_hawai.cl_32.bin.clT"
+//#include "dgemm_hawai.cl_64.bin.clT"
+//#endif
+//
+// //cl_uint  _64Bits  = 32;
+//
+////
+//// The name of the 'const char *' providing the kernel OpenCL source
+////
+////  dgemm_TATB_DIVN_DIVM_DIVK_BS0xBS1_NV0xNV1
+////
+//// For instance, DGEMM_SRC_NAME(N,T,32,64,8,8,8,4,8) is dgemm_NT_32_64_8_8x8_4x8
+////
+//#define DGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1)    dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1
+//#define DGEMM_SRC_NAME_TAHITI(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##_##BITS##_bin_Tahiti
+//#define DGEMM_SRC_NAME_HAWAII(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##_##BITS##_bin_Hawaii
+//
+////
+//// The name of the 'const char []' global variable that contain the SPIR data.
+//// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _spir
+////
+//#define DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1_spir
+//
+////
+//// The name of the 'const char []' global variable that contain the CL binaries data.
+//// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _bin
+////
+//
+//
+//// The name of the kernel itself.
+//// This is basically the name returned by DGEMM_SRC_NAME but as string
+////
+//#define DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) "dgemm_"  #TA #TB "_" #DIVN "_" #DIVM "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1
+//
+////
+//// Helpers to transform N and T in proper clblas values for the macros above
+////
+//#define trans_N clblasNoTrans
+//#define trans_T clblasTrans
+//
+//
+//// Fill a variant descriptor using OpenCL source 
+//#define DGEMM_VARIANT_SRC(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) { \
+//  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) ,     \
+//  DGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) ,        \
+//  NULL, NULL, 0,                                                \
+//  trans_##TA, trans_##TB,                                       \
+//  DIVN,DIVM,DIVK,                                               \
+//{ BS0, BS1 } ,                                                \
+//{ NV0, NV1 }                                                  \
+//} 
+//
+//// Fill a variant descriptor using SPIR  
+//#define DGEMM_VARIANT_SPIR(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) {  \
+//  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) ,       \
+//  NULL , "-x spir -spir-std=1.2"                                  \
+//  DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1),          \
+//  sizeof(DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1)),  \
+//  trans_##TA,trans_##TB,                                          \
+//  DIVN,DIVM,DIVK,                                                 \
+//{ BS0, BS1 } ,                                                  \
+//{ NV0, NV1 }                                                    \
+//} 
+//
+//// Fill a variant descriptor using CL Binaries  
+//#define DGEMM_VARIANT_BIN(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,DEVICE) {  \
+//  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) ,           \
+//  NULL , NULL,                                                        \
+//  DGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS),          \
+//  sizeof(DGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS)),  \
+//  trans_##TA,trans_##TB,                                              \
+//  DIVN,DIVM,DIVK,                                                     \
+//{ BS0, BS1 } ,                                                      \
+//{ NV0, NV1 }                                                        \
+//} 
+
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+//
+//// Just because the full name is too long
+//typedef clblasDgemmFunctorGCN::Variant Variant ; 
+//
+////
+//// The static cache used to store all instances of clblasDgemmFunctorTahiti 
+////
+//typedef clblasFunctorCache<clblasDgemmFunctorGCN,const Variant *> Cache ;
+//static Cache cache  ;
+//
+//
+//// return true iff a kernel variant is applicable to the specified args
+//static bool applicable( const Variant & var, clblasDgemmFunctor::Args & args ) 
+//{
+//#if 0
+//  // Transpose values are tested in select_variant
+//  if ( args.transA != var.transA ) return false ;
+//  if ( args.transB != var.transB ) return false ;
+//#endif
+//  if ( args.N % var.divN != 0 ) return false ; 
+//  if ( args.M % var.divM != 0 ) return false ; 
+//  if ( args.K % var.divK != 0 ) return false ; 
+//  return true ;
+//}
+//
+////
+//// The goal of this function is to return the Variant to be used 
+//// for the DGEMM specified by 'args'. 
+////
+//// The variants are typically tested sequentially from the more 
+//// specific to the more generic. Additional conditions can be 
+//// placed into the surrounding 'if' (typically that would be 
+//// to perform additional tests on M, N and K).
+//// 
+////
+//static const Variant * select_variant( clblasDgemmFunctor::Args & args, const char* DevName, cl_uint _64BitsUse )
+//{
+//
+//
+//  if ( args.transA == clblasNoTrans ) 
+//  {
+//    if ( args.transB == clblasNoTrans ) 
+//    {
+//
+//      // ===== dgemm NN ======
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,N,48,48,8,8,8,6,6) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//
+//#endif
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,N,32,32,8,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,N,32,32,1,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,N,1,1,8,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ;
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//         
+//      } 
+//
+//      // The generic version shall be last
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,N,1,1,1,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      }        
+//    } 
+//    else  
+//    {
+//      // ===== dgemm NT ======
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,T,48,48,8,8,8,6,6) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ;
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//         
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,T,32,32,8,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ;
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//         
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,T,32,32,1,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ;
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//         
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,T,1,1,8,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      } 
+//
+//      // The generic version shall be last
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(N,T,1,1,1,8,8,4,4) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      } 
+//
+//
+//    }
+//  }
+//  else 
+//  {
+//    if ( args.transB == clblasNoTrans ) 
+//    {
+//      // ===== dgemm TN ======
+//
+//      if ( args.M >= 2000 && args.N >= 2000 ) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(T,N,48,48,16,8,8,6,6) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ;
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//         
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(T,N,48,48,8,8,8,6,6) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ;
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//         
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(T,N,32,32,16,8,16,4,2) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ;
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//         
+//      } 
+//
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(T,N,32,32,1,8,16,4,2) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      } 
+//
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(T,N,1,1,16,8,16,4,2) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      } 
+//
+//      // The generic version shall be last
+//      if (true) 
+//      {
+//#if BUILD_KERNEL_FROM_STRING
+//        static const Variant variant = DGEMM_VARIANT_SRC(T,N,1,1,1,8,16,4,2) ;
+//        if ( applicable(variant,args) ) 
+//          return &variant ; 
+//#else 
+//        if(!strcmp(DevName, "Tahiti"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,64,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,32,TAHITI) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//        else if(!strcmp(DevName, "Hawaii"))
+//        {
+//          if(_64BitsUse==64)
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,64,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//
+//          else
+//          {
+//            static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,32,HAWAII) ;
+//            if ( applicable(variant,args) ) 
+//              return &variant ; 
+//          }
+//        }
+//#endif
+//        
+//      } 
+//
+//
+//    }
+//    else
+//    {
+//      // ===== dgemm TT ======
+//
+//      // TODO 
+//    }
+//  }
+//
+//
+//  return NULL ; // No suitable variant ... will use the fallback
+//
+//}  
+
+//clblasDgemmFunctorGCN::clblasDgemmFunctorGCN(Args & args, const Variant * variant, cl_int & err)  :
+//  m_program(0) , m_variant(variant)
+//{
+//
+//  cl_device_id device;
+//  cl_context context;
+//
+//  cl_command_queue queue = args.queue;
+//  err = getDeviceAndContext(queue, device, context);
+//  if( err != CL_SUCCESS )
+//  {
+//    return;
+//  }
+//
+//  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variant->kernel_name) ;
+//
+//  //Ben do I use the correct "kernel_name"?
+//  BinaryLookup bl(context, device, "clblasDgemmFunctorGCN");
+//
+//  bl.variantRaw( this->m_variant->kernel_name, strlen(this->m_variant->kernel_name)+1 ) ;
+//
+//  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+//  {
+//    if ( this->m_variant->bin != 0 ) 
+//    {
+//      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+//      err = bl.buildFromBinary(this->m_variant->bin, this->m_variant->bin_size, this->m_variant->build_options);
+//    }
+//    else
+//    {
+//      // directly build from a char* 
+//      err = bl.buildFromSource(this->m_variant->source);
+//    } 
+//
+//    if ( err != CL_SUCCESS )
+//    {  
+//      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+//
+//      return;
+//    }
+//  }
+//
+//  this->m_program = bl.getProgram();
+//}
+
+clblasStatus clblasDgemmFunctorGCN::execute(Args &args) 
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s\n", this->m_variant->kernel_name) ;
+
+  cl_kernel kernel = clCreateKernel( this->m_program, this->m_variant->kernel_name,  &err);
+  if (err != CL_SUCCESS) return clblasStatus(err) ; 
+
+  if (VERB) printf(" ===> FOUND %s\n", this->m_variant->kernel_name) ;
+
+  int M   = args.M, N = args.N, K = args.K;
+  int lda = args.lda, ldb = args.ldb, ldc = args.ldc;
+
+  int offsetA = args.offA;
+  int offsetB = args.offB;
+  int offsetC = args.offC;
+
+  int arg=0 ; 
+
+  // All dgemm kernels shall have the same arguments: (A,B,C,M,N,K,alpha,beta,lda,ldb,ldc,offa,offb,offc) 
+
+  setKernelArg<cl_mem>(kernel, arg++, args.A);
+  setKernelArg<cl_mem>(kernel, arg++, args.B);
+  setKernelArg<cl_mem>(kernel, arg++, args.C);
+
+  setKernelArg<int>(kernel, arg++, M);
+  setKernelArg<int>(kernel, arg++, N);
+  setKernelArg<int>(kernel, arg++, K);
+
+  setKernelArg<cl_double>(kernel, arg++, args.alpha);
+  if (args.beta!=0 && this->m_variant->mult.compare("__ALPHA")!=0)
+    setKernelArg<cl_double>(kernel, arg++, args.beta);
+
+  setKernelArg<int>(kernel, arg++, lda);
+  setKernelArg<int>(kernel, arg++, ldb);
+  setKernelArg<int>(kernel, arg++, ldc);
+
+  setKernelArg<int>(kernel, arg++, offsetA);
+  setKernelArg<int>(kernel, arg++, offsetB);
+  setKernelArg<int>(kernel, arg++, offsetC);
+
+  const size_t * ls  = this->m_variant->ls  ; // Each work group is made of ls[0] x ls[1]  PE
+  const size_t * bwi = this->m_variant->bwi ; // Each PE updates bwi[0] x bwi[1] values
+
+  size_t globalThreads[2];
+
+  unsigned int thx, thy;
+
+  thx   = M/bwi[0] + ((M%bwi[0] != 0) ? 1 : 0);   
+  thx   = thx/ls[0] + ((thx%ls[0] != 0) ? 1 : 0); 
+  thx   = ls[0] * thx;
+
+  thy   = N/bwi[1] + ((N%bwi[1] != 0) ? 1 : 0);  
+  thy   = thy/ls[1] + ((thy%ls[1] != 0) ? 1 : 0); 
+  thy   = ls[1] * thy;
+
+  globalThreads[0] = thx;
+  globalThreads[1] = thy;
+
+  err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL,
+    globalThreads, NULL , 
+    args.numEventsInWaitList, 
+    args.eventWaitList, 
+    args.events);
+
+  clReleaseKernel(kernel) ;
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  return clblasStatus(err) ;
+}
+
+
+//clblasDgemmFunctorGCN * 
+//  clblasDgemmFunctorGCN::provide(clblasDgemmFunctor::Args & args, const char* DevName) 
+//{
+//
+//  if ( args.order == clblasRowMajor ) 
+//    return NULL ;   // The RowMajor case shall never occur. 
+//
+//  cl_device_id dev;
+//  cl_context   ctxt;
+//
+//  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+//  if (err != CL_SUCCESS)
+//  {
+//    return NULL;
+//  }
+//  cl_uint bitness = getAddressBits(dev);
+//
+//  const Variant * variant = select_variant( args, DevName, bitness ) ;
+//  if ( variant == NULL )  
+//    return NULL ; 
+//
+//
+//
+//
+//  Cache::Lookup lookup(cache, ctxt, dev, variant) ;
+//
+//  if ( lookup.ok() ){
+//    clblasDgemmFunctorGCN * functor = lookup.get();
+//    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+//    return functor;
+//  }
+//
+//  clblasDgemmFunctorGCN * functor = new clblasDgemmFunctorGCN(args, variant, err);
+//  if (err != CL_SUCCESS)
+//  {
+//    return NULL;
+//  }
+//
+//  lookup.set(functor) ;
+//
+//  return functor;
+//
+//}
+
+
+
diff --git a/src/library/blas/functor/gcn_dgemmCommon.cc b/src/library/blas/functor/gcn_dgemmCommon.cc
new file mode 100644
index 0000000..e3c59ad
--- /dev/null
+++ b/src/library/blas/functor/gcn_dgemmCommon.cc
@@ -0,0 +1,997 @@
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h> 
+
+#include "BinaryBuild.h"
+#include "gcn_dgemmCommon.h"
+
+
+
+#if BUILD_KERNEL_FROM_STRING
+#include "dgemm_hawai.clT"
+#else 
+
+#include "dgemm_hawai.clHawaii_64.bin.clT"
+
+
+#include "dgemm_hawai.clTahiti_64.bin.clT"
+#endif
+
+//cl_uint  _64Bits  = 32;
+//dgemm_NT_48_48_8_8x8_6x6_ALPHA_32_bin_Tahiti
+//
+// The name of the 'const char *' providing the kernel OpenCL source
+//
+//  dgemm_TATB_DIVN_DIVM_DIVK_BS0xBS1_NV0xNV1
+//
+// For instance, DGEMM_SRC_NAME(N,T,32,64,8,8,8,4,8) is dgemm_NT_32_64_8_8x8_4x8
+//
+#define DGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)    dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT
+#define DGEMM_SRC_NAME_TAHITI(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Tahiti
+#define DGEMM_SRC_NAME_HAWAII(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Hawaii
+
+//
+// The name of the 'const char []' global variable that contain the SPIR data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _spir
+//
+#define DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1_spir
+
+//
+// The name of the 'const char []' global variable that contain the CL binaries data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _bin
+//
+
+
+// The name of the kernel itself.
+// This is basically the name returned by DGEMM_SRC_NAME but as string
+//
+#define DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) "dgemm_"  #TA #TB "_" #DIVN "_" #DIVM "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT 
+
+//
+// Helpers to transform N and T in proper clblas values for the macros above
+//
+#define trans_N clblasNoTrans
+#define trans_T clblasTrans
+
+
+// Fill a variant descriptor using OpenCL source 
+#define DGEMM_VARIANT_SRC(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) { \
+  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,     \
+  DGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,        \
+  NULL, NULL, 0,                                                \
+  trans_##TA, trans_##TB,                                       \
+  DIVN,DIVM,DIVK,                                               \
+{ BS0, BS1 } ,                                                \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+// Fill a variant descriptor using SPIR  
+#define DGEMM_VARIANT_SPIR(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) {  \
+  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,       \
+  NULL , "-x spir -spir-std=1.2"                                  \
+  DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1),          \
+  sizeof(DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1)),  \
+  trans_##TA,trans_##TB,                                          \
+  DIVN,DIVM,DIVK,                                                 \
+{ BS0, BS1 } ,                                                  \
+{ NV0, NV1 }  ,                                                      \
+#MULT                                                               \
+} 
+
+// Fill a variant descriptor using CL Binaries  
+#define DGEMM_VARIANT_BIN(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,DEVICE, MULT) {  \
+  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,           \
+  NULL , NULL,                                                        \
+  DGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT),          \
+  sizeof(DGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT)),  \
+  trans_##TA,trans_##TB,                                              \
+  DIVN,DIVM,DIVK,                                                     \
+{ BS0, BS1 } ,                                                      \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+// Just because the full name is too long
+typedef clblasDgemmFunctorGCN::Variant Variant ; 
+
+//
+// The static cache used to store all instances of clblasDgemmFunctorTahiti 
+//
+typedef clblasFunctorCache<clBlasGCNdgemmCommonFunctor,const Variant *> Cache ;
+static Cache cache  ;
+
+
+// return true iff a kernel variant is applicable to the specified args
+static bool applicable( const Variant & var, clblasDgemmFunctor::Args & args ) 
+{
+#if 0
+  // Transpose values are tested in select_variant
+  if ( args.transA != var.transA ) return false ;
+  if ( args.transB != var.transB ) return false ;
+#endif
+  if ( args.N % var.divN != 0 ) 
+    return false ; 
+  if ( args.M % var.divM != 0 ) 
+    return false ; 
+  if ( args.K % var.divK != 0 ) 
+    return false ;
+  if ( args.beta==0 && var.mult.compare("__ALPHA")!=0)
+    return false ;
+  return true ;
+}
+
+//
+// The goal of this function is to return the Variant to be used 
+// for the DGEMM specified by 'args'. 
+//
+// The variants are typically tested sequentially from the more 
+// specific to the more generic. Additional conditions can be 
+// placed into the surrounding 'if' (typically that would be 
+// to perform additional tests on M, N and K).
+// 
+//
+static const Variant * select_variant( clblasDgemmFunctor::Args & args, const char* DevName, cl_uint _64BitsUse )
+{
+  if(_64BitsUse!=64)
+  {
+    std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+    assert(1);
+    return NULL;
+  }
+
+  if ( args.transA == clblasNoTrans ) 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+
+      // ===== dgemm NN ======
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,48,48,8,8,8,6,6,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,48,48,8,8,8,6,6,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,64,TAHITI,__ALPHABETA ) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          //const char * test = DGEMM_KERNEL_NAME(N,N,48,48,8,8,8,6,6, __ALPHA);
+          // test
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,64,TAHITI,__ALPHA ) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,48,48,8,8,8,6,6,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,32,32,8,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,32,32,8,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,32,32,8,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,40,40,8,8,8,5,5,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,40,40,8,8,8,5,5,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,40,40,8,8,8,5,5,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,40,40,8,8,8,5,5,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,40,40,8,8,8,5,5,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,40,40,8,8,8,5,5,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        } 
+#endif
+
+      }
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,32,32,1,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,32,32,1,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,32,32,1,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif       
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,1,1,8,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,1,1,8,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,1,1,8,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      // The generic version shall be last
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,1,1,1,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,1,1,1,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,N,1,1,1,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ;
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      }        
+    } 
+    else  
+    {
+      // ===== dgemm NT ======
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,48,48,8,8,8,6,6,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,48,48,8,8,8,6,6, __ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,48,48,8,8,8,6,6,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,32,32,8,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,32,32,8,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,32,32,8,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,40,40,8,8,8,5,5,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,40,40,8,8,8,5,5,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,40,40,8,8,8,5,5,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,40,40,8,8,8,5,5,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,40,40,8,8,8,5,5,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,40,40,8,8,8,5,5,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        } 
+#endif
+
+      }
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,32,32,1,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,32,32,1,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,64,TAHITI, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,32,32,1,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,1,1,8,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,1,1,8,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,1,1,8,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      // The generic version shall be last
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,1,1,1,8,8,4,4,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,1,1,1,8,8,4,4,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(N,T,1,1,1,8,8,4,4,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+
+
+        }
+#endif
+
+      } 
+    }
+  }
+  else 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+      // ===== dgemm TN ======
+
+      if ( args.M >= 2000 && args.N >= 2000 ) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(T,N,48,48,16,8,8,6,6,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(T,N,48,48,16,8,8,6,6,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ;
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,48,48,16,8,8,6,6,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ;
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(T,N,48,48,8,8,8,6,6,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(T,N,48,48,8,8,8,6,6,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,48,48,8,8,8,6,6,64,HAWAII, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(T,N,32,32,16,8,16,4,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ;
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(T,N,32,32,16,8,16,4,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,32,32,16,8,16,4,2,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ;
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif       
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(T,N,32,32,1,8,16,4,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(T,N,32,32,1,8,16,4,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ;
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,32,32,1,8,16,4,2,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ;
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+#endif
+
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(T,N,1,1,16,8,16,4,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(T,N,1,1,16,8,16,4,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ;
+#endif
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,1,1,16,8,16,4,2,64,HAWAII, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+#endif
+
+      } 
+
+      // The generic version shall be last
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(T,N,1,1,1,8,16,4,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(T,N,1,1,1,8,16,4,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ;
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,64,TAHITI,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,64,TAHITI,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,64,HAWAII,__ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = DGEMM_VARIANT_BIN(T,N,1,1,1,8,16,4,2,64,HAWAII,__ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+#endif
+
+      } 
+
+
+    }
+    else
+    {
+      // ===== dgemm TT ======
+
+      // TODO 
+    }
+  }
+
+
+  return NULL ; // No suitable variant ... will use the fallback
+
+}  
+
+
+
+clBlasGCNdgemmCommonFunctor::clBlasGCNdgemmCommonFunctor(Args & args, const Variant * variant, cl_int & err)
+
+{
+
+  cl_device_id device;
+  cl_context context;
+
+
+  m_program = NULL;
+  m_variant = variant;
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variant->kernel_name) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clBlasGCNdgemmCommonFunctor");
+
+  bl.variantRaw( this->m_variant->kernel_name, strlen(this->m_variant->kernel_name)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variant->bin != 0 ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      err = bl.buildFromBinary(this->m_variant->bin, this->m_variant->bin_size, this->m_variant->build_options);
+    }
+    else
+    {
+      // directly build from a char* 
+      err = bl.buildFromSource(this->m_variant->source);
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+clBlasGCNdgemmCommonFunctor * 
+  clBlasGCNdgemmCommonFunctor::provide(clblasDgemmFunctor::Args & args, const char* DevName) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+
+  const Variant * variant = select_variant( args, DevName, bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+
+  Cache::Lookup lookup(cache, ctxt, dev, variant) ;
+
+  if ( lookup.ok() ){
+    clBlasGCNdgemmCommonFunctor * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+
+  clBlasGCNdgemmCommonFunctor * functor = new clBlasGCNdgemmCommonFunctor(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+
+}
diff --git a/src/library/blas/functor/gcn_dgemmSmallMatrices.cc b/src/library/blas/functor/gcn_dgemmSmallMatrices.cc
new file mode 100644
index 0000000..c0f753e
--- /dev/null
+++ b/src/library/blas/functor/gcn_dgemmSmallMatrices.cc
@@ -0,0 +1,654 @@
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+
+#include "BinaryBuild.h"
+#include "gcn_dgemmSmallMatrices.h"
+
+
+#if BUILD_KERNEL_FROM_STRING
+#include "dgemm_gcn_SmallMatrices.clT"
+#else 
+
+#include "dgemm_gcn_SmallMatrices.clHawaii_64.bin.clT"
+
+#include "dgemm_gcn_SmallMatrices.clTahiti_64.bin.clT"
+
+#endif
+
+
+#define DGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)    dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT
+#define DGEMM_SRC_NAME_TAHITI(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Tahiti
+#define DGEMM_SRC_NAME_HAWAII(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Hawaii
+
+//
+// The name of the 'const char []' global variable that contain the SPIR data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _spir
+//
+#define DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)   dgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1_spir
+
+//
+// The name of the 'const char []' global variable that contain the CL binaries data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _bin
+//
+
+
+// The name of the kernel itself.
+// This is basically the name returned by DGEMM_SRC_NAME but as string
+//
+#define DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) "dgemm_"  #TA #TB "_" #DIVN "_" #DIVM "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT 
+
+//
+// Helpers to transform N and T in proper clblas values for the macros above
+//
+#define trans_N clblasNoTrans
+#define trans_T clblasTrans
+
+
+// Fill a variant descriptor using OpenCL source 
+#define DGEMM_VARIANT_SRC(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) { \
+  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,     \
+  DGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,        \
+  NULL, NULL, 0,                                                \
+  trans_##TA, trans_##TB,                                       \
+  DIVN,DIVM,DIVK,                                               \
+{ BS0, BS1 } ,                                                \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+// Fill a variant descriptor using SPIR  
+#define DGEMM_VARIANT_SPIR(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) {  \
+  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,       \
+  NULL , "-x spir -spir-std=1.2"                                  \
+  DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1),          \
+  sizeof(DGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1)),  \
+  trans_##TA,trans_##TB,                                          \
+  DIVN,DIVM,DIVK,                                                 \
+{ BS0, BS1 } ,                                                  \
+{ NV0, NV1 }  ,                                                      \
+#MULT                                                               \
+} 
+
+// Fill a variant descriptor using CL Binaries  
+#define DGEMM_VARIANT_BIN(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,DEVICE, MULT) {  \
+  DGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,           \
+  NULL , NULL,                                                        \
+  DGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT),          \
+  sizeof(DGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT)),  \
+  trans_##TA,trans_##TB,                                              \
+  DIVN,DIVM,DIVK,                                                     \
+{ BS0, BS1 } ,                                                      \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+
+// Just because the full name is too long
+typedef clBlasGCNDgemmSmallMatricesFunctor::Variant Variant ; 
+
+////define the string name of the soure/binary code
+//#define DGEMM_SRC_NAME(TA,TB,MULT)    dgemm_##TA##TB##_SMALL##MULT
+//#define DGEMM_SRC_NAME_HAWAII(TA,TB,  MULT, BITS)   dgemm_##TA##TB##_SMALL##MULT##_##BITS##_bin_Hawaii
+//
+////variant name used to differentiate the different ones
+//#define DGEMM_VARIANT_NAME(TA,TB, MULT) "dgemm_" #TA #TB "_SMALL" #MULT
+////DGEMM_VARIANT_NAME(TA, TB, DIVM , DIVN, DIVK, GREATER48M, GREATER48N, NBKERNEL),    
+//
+//#define DGEMM_KERNEL_NAME(TA,TB,DIVM,DIVN,DIVK,BS0,BS1,NV0,NV1,MULT, BLOC) "dgemm_"  #TA #TB "_" #DIVM "_" #DIVN "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT "_SPLIT_" #BLOC
+//
+//
+//#define trans_N clblasNoTrans
+//#define trans_T clblasTrans
+//
+//// Fill a variant descriptor using OpenCL source 
+//#define DGEMM_VARIANT_OBJ(TA,TB,DIVK,BS0,BS1,NV0,NV1, BITS, MULT,  \
+//  KERNEL_NAME_MAIN, KERNEL_NAME_ROW, \
+//  KERNELS_SRC,  \
+//  KERNEL_BUILD_OPTIONS,  \
+//  KERNELS_BIN,  \
+//  KERNEL_BIN_SIZE) { \
+//  DGEMM_VARIANT_NAME(TA,TB, MULT),                                          \
+//{ KERNEL_NAME_MAIN, KERNEL_NAME_ROW } , \
+//  KERNELS_SRC,  \
+//  KERNEL_BUILD_OPTIONS, \
+//  KERNELS_BIN, \
+//  KERNEL_BIN_SIZE, \
+//  trans_##TA, trans_##TB,                                       \
+//  DIVK ,                                                        \
+//{ BS0, BS1 } ,                                                \
+//{ NV0, NV1 } ,                                                      \
+//#MULT                                                               \
+//} 
+
+typedef clblasFunctorCache<clBlasGCNDgemmSmallMatricesFunctor,const Variant *> CacheSMall ;
+static CacheSMall cachesmall  ;
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+
+
+static bool applicable( const Variant & var, clblasDgemmFunctor::Args & args ) 
+{
+#if 0
+  // Transpose values are tested in select_variant
+  if ( args.transA != var.transA ) return false ;
+  if ( args.transB != var.transB ) return false ;
+#endif
+
+  //if (args.N>=var.divN && args.N % var.divN != 0 )
+  if ( args.N % var.divN != 0 ) 
+    return false ; 
+  if ( args.M % var.divM != 0 ) 
+    return false ; 
+  if ( args.beta==0 && var.mult.compare("__ALPHA")!=0)
+    return false ;
+  return true ;
+}
+
+
+
+
+static const Variant * select_variant_GCNSmallMatrices( clblasDgemmFunctor::Args & args, const char* DevName, cl_uint _64BitsUse )
+{
+
+  if(_64BitsUse!=64)
+  {
+    std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+    assert(1);
+    return NULL;
+  }
+
+  if ( args.transA == clblasNoTrans ) 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,16,16,8,8,8,2,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,16,16,8,8,8,2,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,N,16,16,8,8,8,2,2,64,TAHITI,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+           // const char * test = DGEMM_KERNEL_NAME(N,N,48,48,8,8,8,6,6, __ALPHA);
+           // test
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,N,16,16,8,8,8,2,2,64,TAHITI,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,N,16,16,8,8,8,2,2,64,HAWAII,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+            //const char * test = DGEMM_KERNEL_NAME(N,N,48,48,8,8,8,6,6, __ALPHA);
+           // test
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,N,16,16,8,8,8,2,2,64,HAWAII,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+#endif
+      } 
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,24,24,8,8,8,3,3,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,24,24,8,8,8,3,3,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,N,24,24,8,8,8,3,3,64,TAHITI,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,N,24,24,8,8,8,3,3,64,TAHITI,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,N,24,24,8,8,8,3,3,64,HAWAII,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,N,24,24,8,8,8,3,3,64,HAWAII,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+#endif
+      } 
+      
+
+    }
+    if (args.transB == clblasTrans)
+    {
+            if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,16,16,8,8,8,2,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,16,16,8,8,8,2,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,T,16,16,8,8,8,2,2,64,TAHITI,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+           // const char * test = DGEMM_KERNEL_NAME(N,N,48,48,8,8,8,6,6, __ALPHA);
+           // test
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,T,16,16,8,8,8,2,2,64,TAHITI,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,T,16,16,8,8,8,2,2,64,HAWAII,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+            //const char * test = DGEMM_KERNEL_NAME(N,N,48,48,8,8,8,6,6, __ALPHA);
+           // test
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,T,16,16,8,8,8,2,2,64,HAWAII,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+#endif
+      } 
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,24,24,8,8,8,3,3,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,24,24,8,8,8,3,3,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,T,24,24,8,8,8,3,3,64,TAHITI,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,T,24,24,8,8,8,3,3,64,TAHITI,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ;
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+            static const Variant variant = DGEMM_VARIANT_BIN(N,T,24,24,8,8,8,3,3,64,HAWAII,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+            static const Variant variantA = DGEMM_VARIANT_BIN(N,T,24,24,8,8,8,3,3,64,HAWAII,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+#endif
+      } 
+    }
+
+  }
+
+  return NULL;
+}
+
+clBlasGCNDgemmSmallMatricesFunctor::clBlasGCNDgemmSmallMatricesFunctor(Args & args, const Variant * variant, cl_int & err) 
+{
+
+  cl_device_id device;
+  cl_context context;
+  m_program=NULL;
+  m_variant = variant;
+
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variant->kernel_name) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clBlasGCNDgemmSmallMatricesFunctor");
+
+  bl.variantRaw( this->m_variant->kernel_name, strlen(this->m_variant->kernel_name)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variant->bin != NULL ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      //only 1 binary containing all the kernel
+      err = bl.buildFromBinary(this->m_variant->bin, this->m_variant->bin_size, this->m_variant->build_options);
+    }
+    else
+    {
+      //// directly build from a char* 
+      //for (int i=0; i<4; i++)
+      //  if(this->m_variantSplit->source[i] != 0)
+         err = bl.buildFromSource(this->m_variant->source);
+      //if (VERB) printf(" ===> BUILD PROBLEM WE DON'T SUPPORT SOURCE BUILD FOR SPLIT DGEMM\n") ;
+      return;
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+clBlasGCNDgemmSmallMatricesFunctor * 
+  clBlasGCNDgemmSmallMatricesFunctor::provide(clblasDgemmFunctor::Args & args, const char* DevName) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+
+  const Variant * variant = select_variant_GCNSmallMatrices( args, DevName,  bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+  CacheSMall::Lookup lookup(cachesmall, ctxt, dev, variant) ;
+
+
+  if ( lookup.ok() )
+  {
+    clBlasGCNDgemmSmallMatricesFunctor * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+
+  clBlasGCNDgemmSmallMatricesFunctor * functor = new clBlasGCNDgemmSmallMatricesFunctor(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+
+}
+
+
+//cl_int clBlasGCNDgemmSmallMatricesFunctor::KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[4], Args &args)
+//{
+//  size_t GlobalX =args.M/m_variantSplit->bwi[0];
+//  GlobalX-=GlobalX%m_variantSplit->ls[0];
+//  //
+//
+//  size_t GlobalY = args.N/m_variantSplit->bwi[1];
+//  GlobalY-=GlobalY%m_variantSplit->ls[1];
+//
+//
+//  std::size_t gs[2] = {GlobalX, GlobalY};
+//  cl_int error = 0;
+//
+//  if (args.M%48==0 && args.N%48==0)
+//  {
+//    if (VERB) printf(" ===> EXECUTE KERNEL 0 \n") ;
+//    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,args.events);
+//    return error;
+//  }
+//
+//  if (args.M%48!=0 && args.N%48!=0 && args.M>=48 && args.N>=48 )
+//  {
+//    if (VERB) printf(" ===> EXECUTE KERNEL 0, 1, 2, 3 \n") ;
+//    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+//
+//    gs[0] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+//
+//    gs[1] = 8;
+//    gs[0] = GlobalX;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+//
+//    gs[0] = 8; gs[1] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+//    return error;
+//  }
+//  if (args.M%48==0 && args.N%48!=0 &&  args.N>48 )
+//  {
+//    if (VERB) printf(" ===> EXECUTE KERNEL 0, 2, \n") ;
+//
+//    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+//    gs[1] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+//
+//    return error;
+//  }
+//  if (args.N%48==0 && args.M%48!=0 &&  args.M>48 )
+//  {
+//        if (VERB) printf(" ===> EXECUTE KERNEL 0, 1 \n") ;
+//
+//    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+//    gs[0] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+//
+//    return error;
+//  }
+//  if(args.M<48 && args.N%48==0)
+//  {
+//        if (VERB) printf(" ===> EXECUTE KERNEL 1, \n") ;
+//
+//    gs[0] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, args.events);
+//    return error;
+//  }
+//  if(args.M<48 && args.N%48!=0 && args.N>=48)
+//  {
+//        if (VERB) printf(" ===> EXECUTE KERNEL  1, 3 \n") ;
+//
+//    gs[0] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, NULL);
+//    gs[1] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+//    return error;
+//  }
+//  if(args.N<48 && args.M%48==0)
+//  {
+//        if (VERB) printf(" ===> EXECUTE KERNEL  2 \n") ;
+//
+//    gs[1] = 8;  
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, args.events);
+//    return error;
+//  }
+//  if(args.N<48 && args.M%48!=0&& args.M>=48)
+//  {
+//        if (VERB) printf(" ===> EXECUTE KERNEL 2, 3 \n") ;
+//
+//    gs[1] = 8;  
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, NULL);
+//
+//    gs[0] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+//    return error;
+//  }
+//  if (args.N<48 && args.M<48)
+//  {
+//     if (VERB) printf(" ===> EXECUTE KERNEL  3 \n") ;
+//    gs[0] = 8; gs[1] = 8;
+//    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls,args.numEventsInWaitList, args.eventWaitList, args.events);
+//    return error;
+//  }
+//
+//  return clblasNotImplemented;
+//}
+
+
+
+//need to rewrite execute!!!
+clblasStatus clBlasGCNDgemmSmallMatricesFunctor::execute(Args &args)
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s, alpha =%f ,beta = %f\n", this->m_variant->kernel_name, args.alpha, args.beta) ;
+
+  cl_kernel kernel; 
+ // int NBKernel = 0;
+
+ 
+      kernel= clCreateKernel( this->m_program, this->m_variant->kernel_name,  &err);
+      if (err != CL_SUCCESS) return clblasStatus(err) ; 
+
+
+  //if (NBKernel != 4) return clblasStatus(clblasBuildProgramFailure) ; 
+
+  if (VERB)
+  {
+    
+      printf(" ===> FOUND %s\n", this->m_variant->kernel_name) ;
+  }
+
+  int M   = args.M, N = args.N, K = args.K;
+  int lda = args.lda, ldb = args.ldb, ldc = args.ldc;
+
+  int offsetA = args.offA;
+  int offsetB = args.offB;
+  int offsetC = args.offC;
+
+  int arg=0 ; 
+
+  //// All dgemm kernels shall have the same arguments: (A,B,C,M,N,K,alpha,beta,lda,ldb,ldc,offa,offb,offc) 
+
+  setKernelArg<cl_mem>(kernel, arg++, args.A);
+  setKernelArg<cl_mem>(kernel, arg++, args.B);
+  setKernelArg<cl_mem>(kernel, arg++, args.C);
+
+  setKernelArg<int>(kernel, arg++, M);
+  setKernelArg<int>(kernel, arg++, N);
+  setKernelArg<int>(kernel, arg++, K);
+
+  setKernelArg<cl_double>(kernel, arg++, args.alpha);
+  if (args.beta!=0 && this->m_variant->mult.compare("__ALPHA")!=0)
+    setKernelArg<cl_double>(kernel, arg++, args.beta);
+
+  setKernelArg<int>(kernel, arg++, lda);
+  setKernelArg<int>(kernel, arg++, ldb);
+  setKernelArg<int>(kernel, arg++, ldc);
+
+  setKernelArg<int>(kernel, arg++, offsetA);
+  setKernelArg<int>(kernel, arg++, offsetB);
+  setKernelArg<int>(kernel, arg++, offsetC);
+
+
+ // err = KernelsLaunch(queue, kernel, args);
+  const size_t * ls  = this->m_variant->ls  ; // Each work group is made of ls[0] x ls[1]  PE
+  const size_t * bwi = this->m_variant->bwi ; // Each PE updates bwi[0] x bwi[1] values
+
+  size_t globalThreads[2];
+
+  unsigned int thx, thy;
+
+  thx   = M/bwi[0] + ((M%bwi[0] != 0) ? 1 : 0);   
+  thx   = thx/ls[0] + ((thx%ls[0] != 0) ? 1 : 0); 
+  thx   = ls[0] * thx;
+
+  thy   = N/bwi[1] + ((N%bwi[1] != 0) ? 1 : 0);  
+  thy   = thy/ls[1] + ((thy%ls[1] != 0) ? 1 : 0); 
+  thy   = ls[1] * thy;
+
+  globalThreads[0] = thx;
+  globalThreads[1] = thy;
+
+  err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL,
+    globalThreads, ls , 
+    args.numEventsInWaitList, 
+    args.eventWaitList, 
+    args.events);
+
+
+
+ 
+  clReleaseKernel(kernel) ;
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  return clblasStatus(err) ;
+
+}
+
diff --git a/src/library/blas/functor/gcn_sgemm.cc b/src/library/blas/functor/gcn_sgemm.cc
new file mode 100644
index 0000000..f065bba
--- /dev/null
+++ b/src/library/blas/functor/gcn_sgemm.cc
@@ -0,0 +1,556 @@
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+#include <gcn_sgemm.h>
+
+#include "BinaryBuild.h"
+
+//for the moment only managing source code and cl binary
+
+#if BUILD_KERNEL_FROM_STRING
+#include "sgemm_gcn.clT"
+#else 
+
+#include "sgemm_gcn.clHawaii_64.bin.clT"
+#include "sgemm_gcn.clBonaire_64.bin.clT"
+
+#include "sgemm_gcn.clTahiti_64.bin.clT"
+#endif
+
+
+//
+// The name of the 'const char *' providing the kernel OpenCL source
+//
+//  dgemm_TATB_DIVN_DIVM_DIVK_BS0xBS1_NV0xNV1
+//
+// For instance, DGEMM_SRC_NAME(N,T,32,64,8,8,8,4,8) is dgemm_NT_32_64_8_8x8_4x8
+//
+#define SGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT
+#define SGEMM_SRC_NAME_TAHITI(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Tahiti
+#define SGEMM_SRC_NAME_HAWAII(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Hawaii
+#define SGEMM_SRC_NAME_BONAIRE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Bonaire
+
+//
+// The name of the 'const char []' global variable that contain the SPIR data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _spir
+//
+#define SGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)  sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1_spir
+
+//
+// The name of the 'const char []' global variable that contain the CL binaries data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _bin
+//
+
+
+// The name of the kernel itself.
+// This is basically the name returned by DGEMM_SRC_NAME but as string
+//
+#define SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) "sgemm_"  #TA #TB "_" #DIVN "_" #DIVM "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT
+
+//
+// Helpers to transform N and T in proper clblas values for the macros above
+//
+#define trans_N clblasNoTrans
+#define trans_T clblasTrans
+
+
+// Fill a variant descriptor using OpenCL source 
+#define SGEMM_VARIANT_SRC(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) { \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) ,     \
+  SGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) ,        \
+  NULL, NULL, 0,                                                \
+  trans_##TA, trans_##TB,                                       \
+  DIVN,DIVM,DIVK,                                               \
+{ BS0, BS1 } ,                                                \
+{ NV0, NV1 }                                                  \
+} 
+
+// Fill a variant descriptor using SPIR  
+#define SGEMM_VARIANT_SPIR(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) {  \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1) ,       \
+  NULL , "-x spir -spir-std=1.2"                                  \
+  SGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1),          \
+  sizeof(SGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1)),  \
+  trans_##TA,trans_##TB,                                          \
+  DIVN,DIVM,DIVK,                                                 \
+{ BS0, BS1 } ,                                                  \
+{ NV0, NV1 }                                                    \
+} 
+
+// Fill a variant descriptor using CL Binaries  
+#define SGEMM_VARIANT_BIN_CL1(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,DEVICE,MULT) {  \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) ,           \
+  NULL , NULL,                                                        \
+  SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT),          \
+  sizeof(SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)),  \
+  trans_##TA,trans_##TB,                                              \
+  DIVN,DIVM,DIVK,                                                     \
+{ BS0, BS1 } ,                                                      \
+{ NV0, NV1 }                                                        \
+} 
+
+
+#define SGEMM_VARIANT_BIN_CL2(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,DEVICE,MULT) {  \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) ,           \
+  NULL , "-cl-std=CL2.0",                                                        \
+  SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT),          \
+  sizeof(SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)),  \
+  trans_##TA,trans_##TB,                                              \
+  DIVN,DIVM,DIVK,                                                     \
+{ BS0, BS1 } ,                                                      \
+{ NV0, NV1 }                                                        \
+} 
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+// Just because the full name is too long
+typedef clblasSgemmFunctorGCN::Variant Variant ; 
+
+//
+// The static cache used to store all instances of clblasSgemmFunctorGCN 
+//
+typedef clblasFunctorCache<clblasSgemmFunctorGCN,const Variant *> Cache ;
+static Cache cache  ;
+
+
+// return true iff a kernel variant is applicable to the specified args
+static bool applicable( const Variant & var, clblasSgemmFunctor::Args & args ) 
+{
+#if 0
+  // Transpose values are tested in select_variant
+  if ( args.transA != var.transA ) return false ;
+  if ( args.transB != var.transB ) return false ;
+#endif
+  if ( args.N % var.divN != 0 ) return false ; 
+  if ( args.M % var.divM != 0 ) return false ; 
+  if ( args.K % var.divK != 0 ) return false ; 
+  if ( args.beta==0 && var.mult.compare("__ALPHA")!=0)
+    return false ;
+
+  return true ;
+}
+
+//
+// The goal of this function is to return the Variant to be used 
+// for the DGEMM specified by 'args'. 
+//
+// The variants are typically tested sequentially from the more 
+// specific to the more generic. Additional conditions can be 
+// placed into the surrounding 'if' (typically that would be 
+// to perform additional tests on M, N and K).
+// 
+//
+
+static const Variant * select_variant( clblasSgemmFunctor::Args & args, const char* DevName, cl_uint _64BitsUse )
+{
+  //
+
+  if(_64BitsUse!=64)
+  {
+    std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+    assert(1);
+    return NULL;
+  }
+
+  if ( args.transA == clblasNoTrans ) 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+      if (true) 
+      {
+
+        //we only manage the binary version here
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL1(N,N,96,96,16,16,16,6,6,64,TAHITI, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL1(N,N,96,96,16,16,16,6,6,64,TAHITI, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        //For GCN2 devices we will use the splitsgemm functor
+      }
+
+      if (true) 
+      {
+
+        //we only manage the binary version here
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL1(N,N,64,64,16,16,16,4,4,64,TAHITI, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL1(N,N,64,64,16,16,16,4,4,64,TAHITI, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,N,64,64,16,16,16,4,4,64,HAWAII, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,N,64,64,16,16,16,4,4,64,HAWAII, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Bonaire"))
+        {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,N,64,64,16,16,16,4,4,64,BONAIRE, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,N,64,64,16,16,16,4,4,64,BONAIRE, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        }
+
+      } 
+    }
+    else  
+    {
+      // ===== sgemm NT ======
+
+      if (true) 
+      {
+
+        //we only manage the binary version here
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL1(N,T,96,96,16,16,16,6,6,64,TAHITI, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL1(N,T,96,96,16,16,16,6,6,64,TAHITI, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        //For GCN2 devices we will use the splitsgemm functor
+        //else if(!strcmp(DevName, "Hawaii"))
+        //{
+        //  static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,T,96,96,16,16,16,6,6,64,HAWAII, __ALPHABETA) ;
+        //  if ( applicable(variant,args) ) 
+        //    return &variant ; 
+        //  static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,T,96,96,16,16,16,6,6,64,HAWAII, __ALPHA) ;
+        //  if ( applicable(variantA,args) ) 
+        //    return &variantA ; 
+
+        //}
+        //else if(!strcmp(DevName, "Bonaire"))
+        //{
+        //  static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,T,96,96,16,16,16,6,6,64,BONAIRE, __ALPHABETA) ;
+        //  if ( applicable(variant,args) ) 
+        //    return &variant ; 
+        //  static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,T,96,96,16,16,16,6,6,64,BONAIRE, __ALPHA) ;
+        //  if ( applicable(variantA,args) ) 
+        //    return &variantA ; 
+
+        //}
+      }
+
+      if (true) 
+      {
+
+        //we only manage the binary version here
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL1(N,T,64,64,16,16,16,4,4,64,TAHITI, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL1(N,T,64,64,16,16,16,4,4,64,TAHITI, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,T,64,64,16,16,16,4,4,64,HAWAII, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,T,64,64,16,16,16,4,4,64,HAWAII, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+        else if(!strcmp(DevName, "Bonaire"))
+        {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,T,64,64,16,16,16,4,4,64,BONAIRE, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,T,64,64,16,16,16,4,4,64,BONAIRE, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        }
+      }
+    }
+  }
+  else 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+      // ===== sgemm TN ======
+      if (true) 
+      {
+
+        //we only manage the binary version here
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL1(T,N,96,96,16,16,16,6,6,64,TAHITI, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL1(T,N,96,96,16,16,16,6,6,64,TAHITI, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        //For GCN2 devices we will use the splitsgemm functor
+      }
+
+      if (true) 
+      {
+
+        //we only manage the binary version here
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL1(T,N,64,64,16,16,16,4,4,64,TAHITI, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL1(T,N,64,64,16,16,16,4,4,64,TAHITI, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL2(T,N,64,64,16,16,16,4,4,64,HAWAII, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL2(T,N,64,64,16,16,16,4,4,64,HAWAII, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Bonaire"))
+        {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+          static const Variant variant = SGEMM_VARIANT_BIN_CL2(T,N,64,64,16,16,16,4,4,64,BONAIRE, __ALPHABETA) ;
+          if ( applicable(variant,args) ) 
+            return &variant ; 
+          static const Variant variantA = SGEMM_VARIANT_BIN_CL2(T,N,64,64,16,16,16,4,4,64,BONAIRE, __ALPHA) ;
+          if ( applicable(variantA,args) ) 
+            return &variantA ; 
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        }
+
+      } 
+    }
+  }
+
+  
+
+  return NULL ; // No suitable variant ... will use the fallback
+
+}  
+
+clblasSgemmFunctorGCN::clblasSgemmFunctorGCN(Args & args, const Variant * variant, cl_int & err)  :
+  m_program(0) , m_variant(variant)
+{
+
+  cl_device_id device;
+  cl_context context;
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variant->kernel_name) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clblasSgemmFunctorGCN");
+  //clGetDeviceInfo(device, CL_DEVICE_NAME);
+
+  bl.variantRaw( this->m_variant->kernel_name, strlen(this->m_variant->kernel_name)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variant->bin != 0 ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      err = bl.buildFromBinary(this->m_variant->bin, this->m_variant->bin_size, this->m_variant->build_options);
+    }
+    else
+    {
+      // directly build from a char* 
+      err = bl.buildFromSource(this->m_variant->source);
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+clblasStatus clblasSgemmFunctorGCN::execute(Args &args) 
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s\n", this->m_variant->kernel_name) ;
+
+  cl_kernel kernel = clCreateKernel( this->m_program, this->m_variant->kernel_name,  &err);
+  if (err != CL_SUCCESS) return clblasStatus(err) ; 
+
+  if (VERB) printf(" ===> FOUND %s\n", this->m_variant->kernel_name) ;
+
+  int M   = args.M, N = args.N, K = args.K;
+  int lda = args.lda, ldb = args.ldb, ldc = args.ldc;
+
+  int offsetA = args.offA;
+  int offsetB = args.offB;
+  int offsetC = args.offC;
+
+  int arg=0 ; 
+
+  // All dgemm kernels shall have the same arguments: (A,B,C,M,N,K,alpha,beta,lda,ldb,ldc,offa,offb,offc) 
+
+  setKernelArg<cl_mem>(kernel, arg++, args.A);
+  setKernelArg<cl_mem>(kernel, arg++, args.B);
+  setKernelArg<cl_mem>(kernel, arg++, args.C);
+
+  setKernelArg<int>(kernel, arg++, M);
+  setKernelArg<int>(kernel, arg++, N);
+  setKernelArg<int>(kernel, arg++, K);
+
+  setKernelArg<cl_float>(kernel, arg++, args.alpha);
+  if (args.beta!=0 && this->m_variant->mult.compare("__ALPHA")!=0)
+    setKernelArg<cl_float>(kernel, arg++, args.beta);
+
+  setKernelArg<int>(kernel, arg++, lda);
+  setKernelArg<int>(kernel, arg++, ldb);
+  setKernelArg<int>(kernel, arg++, ldc);
+
+  setKernelArg<int>(kernel, arg++, offsetA);
+  setKernelArg<int>(kernel, arg++, offsetB);
+  setKernelArg<int>(kernel, arg++, offsetC);
+
+  const size_t * ls  = this->m_variant->ls  ; // Each work group is made of ls[0] x ls[1]  PE
+  const size_t * bwi = this->m_variant->bwi ; // Each PE updates bwi[0] x bwi[1] values
+
+  size_t globalThreads[2];
+
+  unsigned int thx, thy;
+
+  thx   = M/bwi[0] + ((M%bwi[0] != 0) ? 1 : 0);   
+  thx   = thx/ls[0] + ((thx%ls[0] != 0) ? 1 : 0); 
+  thx   = ls[0] * thx;
+
+  thy   = N/bwi[1] + ((N%bwi[1] != 0) ? 1 : 0);  
+  thy   = thy/ls[1] + ((thy%ls[1] != 0) ? 1 : 0); 
+  thy   = ls[1] * thy;
+
+  globalThreads[0] = thx;
+  globalThreads[1] = thy;
+
+  err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL,
+    globalThreads, ls , 
+    args.numEventsInWaitList, 
+    args.eventWaitList, 
+    args.events);
+
+  clReleaseKernel(kernel) ;
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  return clblasStatus(err) ;
+}
+
+
+clblasSgemmFunctorGCN * 
+  clblasSgemmFunctorGCN::provide(clblasSgemmFunctor::Args & args, const char* DevName) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+
+  cl_uint bitness = getAddressBits(dev);
+
+  const Variant * variant = select_variant( args, DevName, bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+  Cache::Lookup lookup(cache, ctxt, dev, variant) ;
+
+  if ( lookup.ok() )
+  {
+    clblasSgemmFunctorGCN * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+
+  clblasSgemmFunctorGCN * functor = new clblasSgemmFunctorGCN(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+
+}
+
diff --git a/src/library/blas/functor/gcn_sgemmSmallMatrices.cc b/src/library/blas/functor/gcn_sgemmSmallMatrices.cc
new file mode 100644
index 0000000..c50a899
--- /dev/null
+++ b/src/library/blas/functor/gcn_sgemmSmallMatrices.cc
@@ -0,0 +1,558 @@
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+
+#include "BinaryBuild.h"
+#include "gcn_sgemmSmallMatrices.h"
+
+
+#if BUILD_KERNEL_FROM_STRING
+#include "sgemm_gcn_SmallMatrices.clT"
+#else 
+
+#include "sgemm_gcn_SmallMatrices.clHawaii_64.bin.clT"
+#include "sgemm_gcn_SmallMatrices.clBonaire_64.bin.clT"
+#include "sgemm_gcn_SmallMatrices.clTahiti_64.bin.clT"
+
+#endif
+
+
+#define SGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)    sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT
+#define SGEMM_SRC_NAME_TAHITI(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Tahiti
+#define SGEMM_SRC_NAME_HAWAII(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Hawaii
+#define SGEMM_SRC_NAME_BONAIRE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1##MULT##_##BITS##_bin_Bonaire
+
+
+//
+// The name of the 'const char []' global variable that contain the SPIR data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _spir
+//
+#define SGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT)   sgemm_##TA##TB##_##DIVN##_##DIVM##_##DIVK##_##BS0##x##BS1##_##NV0##x##NV1_spir
+
+//
+// The name of the 'const char []' global variable that contain the CL binaries data.
+// That name is similar to the one produced by DGEMM_SRC_NAME but suffixed by _bin
+//
+
+
+// The name of the kernel itself.
+// This is basically the name returned by DGEMM_SRC_NAME but as string
+//
+#define SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,MULT) "sgemm_"  #TA #TB "_" #DIVN "_" #DIVM "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT 
+
+//
+// Helpers to transform N and T in proper clblas values for the macros above
+//
+#define trans_N clblasNoTrans
+#define trans_T clblasTrans
+
+
+// Fill a variant descriptor using OpenCL source 
+#define SGEMM_VARIANT_SRC(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) { \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,     \
+  SGEMM_SRC_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,        \
+  NULL, NULL, 0,                                                \
+  trans_##TA, trans_##TB,                                       \
+  DIVN,DIVM,DIVK,                                               \
+{ BS0, BS1 } ,                                                \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+// Fill a variant descriptor using SPIR  
+#define SGEMM_VARIANT_SPIR(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) {  \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,       \
+  NULL , "-x spir -spir-std=1.2"                                  \
+  SGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1),          \
+  sizeof(SGEMM_SPIR_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1)),  \
+  trans_##TA,trans_##TB,                                          \
+  DIVN,DIVM,DIVK,                                                 \
+{ BS0, BS1 } ,                                                  \
+{ NV0, NV1 }  ,                                                      \
+#MULT                                                               \
+} 
+
+// Fill a variant descriptor using CL Binaries  
+#define SGEMM_VARIANT_BIN_CL1(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,DEVICE, MULT) {  \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,           \
+  NULL , NULL,                                                        \
+  SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT),          \
+  sizeof(SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT)),  \
+  trans_##TA,trans_##TB,                                              \
+  DIVN,DIVM,DIVK,                                                     \
+{ BS0, BS1 } ,                                                      \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+
+#define SGEMM_VARIANT_BIN_CL2(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS,DEVICE, MULT) {  \
+  SGEMM_KERNEL_NAME(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1, MULT) ,           \
+  NULL , "-cl-std=CL2.0",                                                        \
+  SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT),          \
+  sizeof(SGEMM_SRC_NAME##_##DEVICE(TA,TB,DIVN,DIVM,DIVK,BS0,BS1,NV0,NV1,BITS, MULT)),  \
+  trans_##TA,trans_##TB,                                              \
+  DIVN,DIVM,DIVK,                                                     \
+{ BS0, BS1 } ,                                                      \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+// Just because the full name is too long
+typedef clBlasGCNSgemmSmallMatricesFunctor::Variant Variant ; 
+
+////define the string name of the soure/binary code
+//#define DGEMM_SRC_NAME(TA,TB,MULT)    dgemm_##TA##TB##_SMALL##MULT
+//#define DGEMM_SRC_NAME_HAWAII(TA,TB,  MULT, BITS)   dgemm_##TA##TB##_SMALL##MULT##_##BITS##_bin_Hawaii
+//
+////variant name used to differentiate the different ones
+//#define DGEMM_VARIANT_NAME(TA,TB, MULT) "dgemm_" #TA #TB "_SMALL" #MULT
+////DGEMM_VARIANT_NAME(TA, TB, DIVM , DIVN, DIVK, GREATER48M, GREATER48N, NBKERNEL),    
+//
+//#define DGEMM_KERNEL_NAME(TA,TB,DIVM,DIVN,DIVK,BS0,BS1,NV0,NV1,MULT, BLOC) "dgemm_"  #TA #TB "_" #DIVM "_" #DIVN "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT "_SPLIT_" #BLOC
+//
+//
+//#define trans_N clblasNoTrans
+//#define trans_T clblasTrans
+//
+//// Fill a variant descriptor using OpenCL source 
+//#define DGEMM_VARIANT_OBJ(TA,TB,DIVK,BS0,BS1,NV0,NV1, BITS, MULT,  \
+//  KERNEL_NAME_MAIN, KERNEL_NAME_ROW, \
+//  KERNELS_SRC,  \
+//  KERNEL_BUILD_OPTIONS,  \
+//  KERNELS_BIN,  \
+//  KERNEL_BIN_SIZE) { \
+//  DGEMM_VARIANT_NAME(TA,TB, MULT),                                          \
+//{ KERNEL_NAME_MAIN, KERNEL_NAME_ROW } , \
+//  KERNELS_SRC,  \
+//  KERNEL_BUILD_OPTIONS, \
+//  KERNELS_BIN, \
+//  KERNEL_BIN_SIZE, \
+//  trans_##TA, trans_##TB,                                       \
+//  DIVK ,                                                        \
+//{ BS0, BS1 } ,                                                \
+//{ NV0, NV1 } ,                                                      \
+//#MULT                                                               \
+//} 
+
+typedef clblasFunctorCache<clBlasGCNSgemmSmallMatricesFunctor,const Variant *> CacheSMallsgemm ;
+static CacheSMallsgemm cachesmall  ;
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+
+
+static bool applicable( const Variant & var, clblasSgemmFunctor::Args & args ) 
+{
+#if 0
+  // Transpose values are tested in select_variant
+  if ( args.transA != var.transA ) return false ;
+  if ( args.transB != var.transB ) return false ;
+#endif
+
+  //if (args.N>=var.divN && args.N % var.divN != 0 )
+  if ( args.N % var.divN != 0 ) 
+    return false ; 
+  if ( args.M % var.divM != 0 ) 
+    return false ; 
+  if ( args.beta==0 && var.mult.compare("__ALPHA")!=0)
+    return false ;
+  return true ;
+}
+
+
+
+
+static const Variant * select_variant_GCNSmallMatrices( clblasSgemmFunctor::Args & args, const char* DevName, cl_uint _64BitsUse )
+{
+
+  if(_64BitsUse!=64)
+  {
+    std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+    assert(1);
+    return NULL;
+  }
+
+  if ( args.transA == clblasNoTrans ) 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,N,32,32,16,16,16,2,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,N,32,32,16,16,16,2,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL1(N,N,32,32,16,16,16,2,2,64,TAHITI,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL1(N,N,32,32,16,16,16,2,2,64,TAHITI,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,N,32,32,16,16,16,2,2,64,HAWAII,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,N,32,32,16,16,16,2,2,64,HAWAII,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+        else if(!strcmp(DevName, "Bonaire"))
+        {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,N,32,32,16,16,16,2,2,64,BONAIRE,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,N,32,32,16,16,16,2,2,64,BONAIRE,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        }
+
+#endif
+      } 
+
+      
+
+    }
+    if (args.transB == clblasTrans)
+    {
+            if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(N,T,32,32,16,16,16,2,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(N,T,32,32,16,16,16,2,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL1(N,T,32,32,16,16,16,2,2,64,TAHITI,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL1(N,T,32,32,16,16,16,2,2,64,TAHITI,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,T,32,32,16,16,16,2,2,64,HAWAII,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ;           
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,T,32,32,16,16,16,2,2,64,HAWAII,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Bonaire"))
+        {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL2(N,T,32,32,16,16,16,2,2,64,BONAIRE,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ;           
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL2(N,T,32,32,16,16,16,2,2,64,BONAIRE,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        }
+#endif
+      } 
+    }
+  }
+  else
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+      if (true) 
+      {
+#if BUILD_KERNEL_FROM_STRING
+        static const Variant variant = DGEMM_VARIANT_SRC(T,N,32,32,16,16,16,2,2,__ALPHABETA) ;
+        if ( applicable(variant,args) ) 
+          return &variant ; 
+
+        static const Variant variantA = DGEMM_VARIANT_SRC(T,N,32,32,16,16,16,2,2,__ALPHA) ;
+        if ( applicable(variantA,args) ) 
+          return &variantA ; 
+
+#else 
+        if(!strcmp(DevName, "Tahiti"))
+        {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL1(T,N,32,32,16,16,16,2,2,64,TAHITI,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+           
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL1(T,N,32,32,16,16,16,2,2,64,TAHITI,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ;
+#endif //#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+        }
+        else if(!strcmp(DevName, "Hawaii"))
+        {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL2(T,N,32,32,16,16,16,2,2,64,HAWAII,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL2(T,N,32,32,16,16,16,2,2,64,HAWAII,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ; 
+#endif //#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+        }
+
+        else if(!strcmp(DevName, "Bonaire"))
+        {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+            static const Variant variant = SGEMM_VARIANT_BIN_CL2(T,N,32,32,16,16,16,2,2,64,BONAIRE,__ALPHABETA ) ;
+            if ( applicable(variant,args) ) 
+              return &variant ; 
+
+            static const Variant variantA = SGEMM_VARIANT_BIN_CL2(T,N,32,32,16,16,16,2,2,64,BONAIRE,__ALPHA ) ;
+            if ( applicable(variantA,args) ) 
+              return &variantA ;
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        }
+
+#endif
+      } 
+    }
+  }
+
+
+  return NULL;
+}
+
+clBlasGCNSgemmSmallMatricesFunctor::clBlasGCNSgemmSmallMatricesFunctor(Args & args, const Variant * variant, cl_int & err) 
+{
+
+  cl_device_id device;
+  cl_context context;
+  m_program=NULL;
+  m_variant = variant;
+
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variant->kernel_name) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clBlasGCNSgemmSmallMatricesFunctor");
+
+  bl.variantRaw( this->m_variant->kernel_name, strlen(this->m_variant->kernel_name)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variant->bin != NULL ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      //only 1 binary containing all the kernel
+      err = bl.buildFromBinary(this->m_variant->bin, this->m_variant->bin_size, this->m_variant->build_options);
+    }
+    else
+    {
+      //// directly build from a char* 
+         err = bl.buildFromSource(this->m_variant->source);
+      return;
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+clBlasGCNSgemmSmallMatricesFunctor * 
+  clBlasGCNSgemmSmallMatricesFunctor::provide(clblasSgemmFunctor::Args & args, const char* DevName) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+
+  const Variant * variant = select_variant_GCNSmallMatrices( args, DevName,  bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+  CacheSMallsgemm::Lookup lookup(cachesmall, ctxt, dev, variant) ;
+
+
+  if ( lookup.ok() )
+  {
+    clBlasGCNSgemmSmallMatricesFunctor * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+
+  clBlasGCNSgemmSmallMatricesFunctor * functor = new clBlasGCNSgemmSmallMatricesFunctor(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+
+}
+
+
+
+
+//need to rewrite execute!!!
+clblasStatus clBlasGCNSgemmSmallMatricesFunctor::execute(Args &args)
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s, alpha =%f ,beta = %f\n", this->m_variant->kernel_name, args.alpha, args.beta) ;
+
+  cl_kernel kernel; 
+ // int NBKernel = 0;
+
+ 
+      kernel= clCreateKernel( this->m_program, this->m_variant->kernel_name,  &err);
+      if (err != CL_SUCCESS) return clblasStatus(err) ; 
+
+
+  //if (NBKernel != 4) return clblasStatus(clblasBuildProgramFailure) ; 
+
+  if (VERB)
+  {
+    
+      printf(" ===> FOUND %s\n", this->m_variant->kernel_name) ;
+  }
+
+  int M   = args.M, N = args.N, K = args.K;
+  int lda = args.lda, ldb = args.ldb, ldc = args.ldc;
+
+  int offsetA = args.offA;
+  int offsetB = args.offB;
+  int offsetC = args.offC;
+
+  int arg=0 ; 
+
+  //// All dgemm kernels shall have the same arguments: (A,B,C,M,N,K,alpha,beta,lda,ldb,ldc,offa,offb,offc) 
+
+  setKernelArg<cl_mem>(kernel, arg++, args.A);
+  setKernelArg<cl_mem>(kernel, arg++, args.B);
+  setKernelArg<cl_mem>(kernel, arg++, args.C);
+
+  setKernelArg<int>(kernel, arg++, M);
+  setKernelArg<int>(kernel, arg++, N);
+  setKernelArg<int>(kernel, arg++, K);
+
+  setKernelArg<cl_float>(kernel, arg++, args.alpha);
+  if (args.beta!=0 && this->m_variant->mult.compare("__ALPHA")!=0)
+    setKernelArg<cl_float>(kernel, arg++, args.beta);
+
+  setKernelArg<int>(kernel, arg++, lda);
+  setKernelArg<int>(kernel, arg++, ldb);
+  setKernelArg<int>(kernel, arg++, ldc);
+
+  setKernelArg<int>(kernel, arg++, offsetA);
+  setKernelArg<int>(kernel, arg++, offsetB);
+  setKernelArg<int>(kernel, arg++, offsetC);
+
+
+ // err = KernelsLaunch(queue, kernel, args);
+  const size_t * ls  = this->m_variant->ls  ; // Each work group is made of ls[0] x ls[1]  PE
+  const size_t * bwi = this->m_variant->bwi ; // Each PE updates bwi[0] x bwi[1] values
+
+  size_t globalThreads[2];
+
+  unsigned int thx, thy;
+
+  thx   = M/bwi[0] + ((M%bwi[0] != 0) ? 1 : 0);   
+  thx   = thx/ls[0] + ((thx%ls[0] != 0) ? 1 : 0); 
+  thx   = ls[0] * thx;
+
+  thy   = N/bwi[1] + ((N%bwi[1] != 0) ? 1 : 0);  
+  thy   = thy/ls[1] + ((thy%ls[1] != 0) ? 1 : 0); 
+  thy   = ls[1] * thy;
+
+  globalThreads[0] = thx;
+  globalThreads[1] = thy;
+
+  err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL,
+    globalThreads, ls , 
+    args.numEventsInWaitList, 
+    args.eventWaitList, 
+    args.events);
+
+
+
+ 
+  clReleaseKernel(kernel) ;
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  return clblasStatus(err) ;
+
+}
+
diff --git a/src/library/blas/functor/gpu_dtrsm.cc b/src/library/blas/functor/gpu_dtrsm.cc
new file mode 100644
index 0000000..20ffdcd
--- /dev/null
+++ b/src/library/blas/functor/gpu_dtrsm.cc
@@ -0,0 +1,823 @@
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include "functor.h"
+#include "binary_lookup.h"
+#include <iostream>
+
+#include "functor_xtrsm.h"
+#include "gpu_dtrsm.h"
+#include "tahiti.h"
+#include "BinaryBuild.h"
+
+#if BUILD_KERNEL_FROM_STRING
+#include "dtrsm_gpu.clT"
+#else
+
+#include "dtrsm_gpu.clHawaii_64.bin.clT"
+
+
+#include "dtrsm_gpu.clTahiti_64.bin.clT"
+#endif
+
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+
+//TODO
+//clReleaseKernel(kernel) ;
+
+#define BLOCK_SIZE 16 // inner blocking size, <=32
+#define NB 128        // outer blocking size, >BLOCK_SIZE
+
+
+//
+// The static cache used to store all instances of clblasDtrsmFunctorGpu /clblasDgemmFunctorTahiti 
+//
+typedef clblasFunctorCache<clblasDtrsmFunctorGpu, bool> Cache ;
+static Cache cache  ;
+
+
+clblasDtrsmFunctorGpu::clblasDtrsmFunctorGpu(Args & args,  cl_int & err, const char* DevName, cl_uint _64BitsUse)  :
+  m_program(0)
+{
+  
+  cl_device_id device;
+  cl_context context;
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+
+
+
+
+  if( err != CL_SUCCESS )
+  {
+      return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", "clblasDtrsmFunctorGpu") ;
+
+  BinaryLookup bl(context, device, "clblasDtrsmFunctorGpu");
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    // directly build from a char* 
+#if BUILD_KERNEL_FROM_STRING
+    err = bl.buildFromSource(dtrsm_gpu_kernels);
+#else 
+    if(!strcmp(DevName, "Tahiti"))
+    {
+#ifndef CLBLAS_TAHITI_DYNAMIC_KERNEL
+      if(_64BitsUse==64)
+        err = bl.buildFromBinary(dtrsm_gpu_kernels_64_bin_Tahiti, sizeof(dtrsm_gpu_kernels_64_bin_Tahiti), NULL);
+      else
+      {
+        std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			  assert(1);
+      }
+#endif
+    }
+
+    else if(!strcmp(DevName, "Hawaii"))
+    {
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+      if(_64BitsUse==64)
+        err = bl.buildFromBinary(dtrsm_gpu_kernels_64_bin_Hawaii, sizeof(dtrsm_gpu_kernels_64_bin_Hawaii), NULL);
+      else
+      {
+        std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+        assert(1);
+      }
+#endif
+    }
+
+#endif
+ 
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+
+#define CALL_KERNEL_TRIPLE_UPDATE(kernel_name, prg, queue, A, offA, d_dinvA, i, lda, M, event)   \
+do{                                                                                              \
+  err = call_kernel_triple_update(kernel_name, prg, queue, A, offA, d_dinvA, i, lda, M, event);  \
+  if(err != CL_SUCCESS) {                                                                        \
+    return err;                                                                                  \
+  }                                                                                              \
+} while(0)
+
+
+
+cl_int call_kernel_triple_update(const char* kernel_name, 
+                                 const cl_program prg,
+                                 const cl_command_queue queue,
+                                 cl_mem A,
+                                 unsigned int offA,
+                                 cl_mem d_dinvA,
+                                 int i,
+                                 unsigned int lda,
+                                 int M, 
+                                 cl_event *event)
+{
+  cl_int err = 0;
+
+  unsigned int m = M; 
+
+  int npages = M/(i*2)+(M%(i*2)!=0);
+  size_t globalLocal  [2] = { (i <= 32)?(i/4):16, 4};
+  size_t globalThreads[2] = { (i/(globalLocal[0]*globalLocal[1]))* globalLocal[0],
+				npages*(i/16) * globalLocal[1]};
+
+  cl_kernel kernel = clCreateKernel(prg, kernel_name, &err);
+  if (err != CL_SUCCESS) {
+    //printf( "create kernel %s failed with %d\n", kernel_name, err );
+    return err;
+  }
+
+  clSetKernelArg(kernel, 0, sizeof(cl_mem), &A);
+  clSetKernelArg(kernel, 1, sizeof(unsigned int), &offA);
+  clSetKernelArg(kernel, 2, sizeof(cl_mem), &d_dinvA);
+  clSetKernelArg(kernel, 3, sizeof(int), &i);
+  clSetKernelArg(kernel, 4, sizeof(unsigned int), &lda);
+  clSetKernelArg(kernel, 5, sizeof(int), &npages);
+  clSetKernelArg(kernel, 6, sizeof(unsigned int), &m);
+
+  err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL,
+			       globalThreads, globalLocal , 
+			       0, NULL, event);
+  
+ 
+  if (err != CL_SUCCESS) {
+    clReleaseKernel(kernel);
+    //printf( "execution of kernel %s failed with %d\n", kernel_name, err );
+    return err;
+  }
+
+  err = clReleaseKernel(kernel);
+  return err;
+
+}
+
+
+
+
+//extern "C"
+cl_int diag_dtrtri (cl_program prg,
+                    cl_command_queue queue,		  
+                    int M, 
+                    clblasUplo uplo, 
+                    clblasDiag diag, 
+                    cl_mem A, 
+                    size_t offA,
+                    cl_mem d_dinvA, 
+                    size_t lda,
+                    cl_event *event )
+{
+
+  cl_int err = 0;
+
+  /*
+    This routine is used in dtrsm
+  */
+
+
+  int nthreads = (M/BLOCK_SIZE + (M % BLOCK_SIZE != 0)) * BLOCK_SIZE;
+  unsigned int m = M;
+ 
+  if (uplo == clblasLower) {
+
+
+    cl_kernel diag_dtrtri_kernel_lower = clCreateKernel(prg, "DIAG_DTRTRI_KERNEL_LOWER", &err);
+    if (err != CL_SUCCESS) {
+      //printf( "create kernel -diag_dtrtri_kernel_lower- failed with %d\n", err );
+      return err;
+    }
+
+    int isDiagUnit = (diag == clblasUnit);
+    clSetKernelArg(diag_dtrtri_kernel_lower, 0, sizeof(int), &isDiagUnit);
+    clSetKernelArg(diag_dtrtri_kernel_lower, 1, sizeof(cl_mem), &A);
+    clSetKernelArg(diag_dtrtri_kernel_lower, 2, sizeof(unsigned int), &offA);
+    clSetKernelArg(diag_dtrtri_kernel_lower, 3, sizeof(cl_mem), &d_dinvA);
+    clSetKernelArg(diag_dtrtri_kernel_lower, 4, sizeof(unsigned int), &lda);
+    clSetKernelArg(diag_dtrtri_kernel_lower, 5, sizeof(unsigned int), &m);
+
+
+    size_t globalThreads[1] = { nthreads };
+    size_t globalLocal  [1] = { BLOCK_SIZE };
+ 
+    err = clEnqueueNDRangeKernel(queue, diag_dtrtri_kernel_lower, 1, NULL,
+				 globalThreads, globalLocal , 
+				 0, NULL, event);
+
+    if (err != CL_SUCCESS) {
+      //printf( "kernel -diag_dtrtri_kernel_lower- failed with %d\n", err );
+      return err;
+    }
+
+    err = clReleaseKernel(diag_dtrtri_kernel_lower);
+    if (err != CL_SUCCESS) {
+      return err;
+    }
+
+
+    // update the inverse up to the size of BLOCK_SIZE
+    for( int i=BLOCK_SIZE; i < NB; i*=2 ) {
+
+      switch (i) {
+      case 16:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_16_PART1_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_16_PART2_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	break;
+
+      case 32:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_32_PART1_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_32_PART2_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	break;
+
+      case 64:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_64_PART1_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_64_PART2_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	break;
+
+      default:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_ABOVE64_PART1_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_ABOVE64_PART2_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_ABOVE64_PART3_L", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	break;
+	
+      }
+      if (i*2 >= M) break;
+    }
+    
+  }
+  else {
+
+    cl_kernel diag_dtrtri_kernel_upper = clCreateKernel(prg, "DIAG_DTRTRI_KERNEL_UPPER", &err);
+    if (err != CL_SUCCESS) {
+      //printf( "create kernel -diag_dtrtri_kernel_upper- failed with %d\n", err );
+      return err;
+    }
+
+    int isDiagUnit = (diag == clblasUnit);
+    clSetKernelArg(diag_dtrtri_kernel_upper, 0, sizeof(int), &isDiagUnit);
+    clSetKernelArg(diag_dtrtri_kernel_upper, 1, sizeof(cl_mem), &A);
+    clSetKernelArg(diag_dtrtri_kernel_upper, 2, sizeof(unsigned int), &offA);
+    clSetKernelArg(diag_dtrtri_kernel_upper, 3, sizeof(cl_mem), &d_dinvA);
+    clSetKernelArg(diag_dtrtri_kernel_upper, 4, sizeof(unsigned int), &lda);
+    clSetKernelArg(diag_dtrtri_kernel_upper, 5, sizeof(unsigned int), &m);
+
+    size_t globalThreads[1] = { nthreads };
+    size_t globalLocal  [1] = { BLOCK_SIZE };
+ 
+    err = clEnqueueNDRangeKernel(queue, diag_dtrtri_kernel_upper, 1, NULL,
+				 globalThreads, globalLocal , 
+				 0, NULL, event);
+
+    if (err != CL_SUCCESS) {
+      //printf( "kernel -diag_dtrtri_kernel_upper- failed with %d\n", err );
+      return err;
+    }
+ 
+    clReleaseKernel(diag_dtrtri_kernel_upper);
+    if (err != CL_SUCCESS) {
+      return err;
+    }
+
+    // update the inverse up to the size of BLOCK_SIZE
+    for( int i=BLOCK_SIZE; i < NB; i*=2 ) {
+
+      switch (i) {
+      case 16:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_16_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	break;
+
+      case 32:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_32_PART1_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_32_PART2_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	break;
+
+      case 64:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_64_PART1_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_64_PART2_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	break;
+
+      default:
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_ABOVE64_PART1_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_ABOVE64_PART2_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+	CALL_KERNEL_TRIPLE_UPDATE("TRIPLE_DGEMM_UPDATE_ABOVE64_PART3_R", prg, queue, A, offA, d_dinvA, i, lda, M, event);
+
+	break;
+      }
+      
+      if (i*2 >= M) break;
+    }
+    
+  }
+
+  return err;
+
+}
+
+
+
+
+#define check_error(cmd)                   \
+do{                                        \
+  cl_int xxxerr = cmd ;                    \
+  if (xxxerr != CL_SUCCESS) {              \
+    if(InvA != 0)                          \
+      clReleaseMemObject(InvA);            \
+    if(X != 0)                             \
+      clReleaseMemObject(X);               \
+    return xxxerr;                         \
+  }                                        \
+} while(0)
+
+
+static cl_int clearBuffer( cl_command_queue  queue ,
+                           cl_mem  buffer ,
+                           size_t  buffer_size )
+{
+  
+  cl_int err = 0;
+  cl_event  event; 
+  // Hummm clEnqueueFillBuffer is OpenCL 1.2 !!! 
+  double zero = 0.0 ; 
+  err = clEnqueueFillBuffer(queue, 
+                            buffer, 
+                            &zero, 
+                            sizeof(double), 
+                            0,  // offset 
+                            buffer_size, 
+                            0,
+                            NULL, 
+                            &event 
+                            ) ;
+
+  return err;
+
+}
+
+
+
+#define nb 128        // outer blocking size, >BLOCK_SIZE
+#define min(x,y) ((x)<(y)?(x):(y))
+
+
+
+cl_int cl_dtrsm( cl_program prg,
+                 cl_command_queue  queue , 
+                 clblasSide side, 
+                 clblasUplo uplo, 
+                 clblasTranspose transA, 
+                 clblasDiag diag, 
+                 int M, 
+                 int N,
+                 double alpha,
+                 cl_mem A, size_t offA, size_t ldA,
+                 cl_mem B, size_t offB, size_t ldB,
+                 cl_event *event
+                 ) 
+{
+  cl_int err = 0;
+
+  int i;
+  cl_context context;
+  err = getQueueContext(queue, &context);
+  if(err != CL_SUCCESS) return err;
+
+  /* quick return on wrong size */
+  if (M <= 0 || N <= 0)
+    return clblasInvalidDim;
+    
+  double neg_one = -1.0 ;
+  double one     =  1.0 ;
+  double zero    =  0.0 ; 
+
+
+
+  // Compute the number of blocks of the specified 'size' to fully cover 'n' 
+  // Simply speaking, this is n/size rounded up.
+#define BLOCKS(n,size) ( ( (n) / size ) + ( (n) % (size) != 0 ) )
+
+#define CLEANUP   
+
+#define END_DGEMM_ARGS 1,&queue,0,NULL,event
+
+  // Helper to compute pass the 3 arguments describing a (sub)-matrix to clblasDgemm 
+#define _(M,i,j)       M    , (off##M + ((i)+(j)*ld##M) ) , ld##M 
+
+  cl_mem InvA = 0;
+  cl_mem X    = 0;
+  // X of size mxn  will contain the result 
+  size_t ldX = M ; 
+  size_t offX = 0; //must be 0: needed by the _(X,i,j) macro
+  size_t size_X = N*ldX * sizeof(double);
+  X = clCreateBuffer(context, CL_MEM_READ_WRITE, size_X, NULL, &err);
+  check_error(err) ;         
+  err = clearBuffer( queue, X, size_X ) ;
+  check_error(err) ; 
+
+
+  if (side == clblasLeft) 
+    {
+      // side=L
+      /* invert the diagonals
+       * Allocate device memory for the inverted diagonal blocks, size=m*nb
+       */
+      size_t ldInvA = nb ; 
+      size_t offInvA = 0; //must be 0: needed by the _(X,i,j) macro
+      size_t size_InvA = ldInvA * BLOCKS(M,nb) * nb *sizeof(double); 
+      InvA = clCreateBuffer(context, CL_MEM_READ_WRITE, size_InvA, NULL, &err);
+
+      check_error(err) ;         
+      err = clearBuffer( queue, InvA, size_InvA ) ;
+      check_error(err) ; 
+
+      diag_dtrtri (prg, queue, M, uplo, diag, A, offA, InvA, ldA, event);
+
+      //
+      // Helper for C = alpha * transp(A) * B + beta * C         
+      //
+      // In the calls below:
+      //   - the 1st matrix shall be either A or InvA transposed according to transA.
+      //   - the 2nd and 3rd matrices are either B and X 
+      // 
+#define DGEMM_LEFT(m, n, k, alpha, A,  B, beta,  C) \
+    do { \
+        err = clblasDgemm(clblasColumnMajor, transA, clblasNoTrans , m, n, k, alpha, A, B, beta, C , 1, &queue, 0, NULL, event ) ; \
+        check_error(err) ; \
+    } while(0) 
+
+
+      if (transA == clblasNoTrans) 
+        {
+          /* the non-transpose case */
+          if (uplo == clblasLower) 
+            {
+              
+              /* the lower case */
+              /* handle the first block seperately with alpha */
+              int mm = min(nb, (int) M);
+              DGEMM_LEFT( mm, N, mm, alpha, _(InvA,0,0) , _(B,0,0),  zero, _(X,0,0) );
+
+              if (nb < M) 
+                {
+                  DGEMM_LEFT( M-nb, N, nb, neg_one, _(A,nb,0), _(X,0,0), alpha, _(B,nb,0)  );
+                  
+                  /* the rest blocks */
+                  for( i=nb; i < M; i += nb ) {
+                    mm = min((int)M-i, nb);
+                    DGEMM_LEFT( mm, N, mm, one, _(InvA,0,i), _(B,i,0), zero, _(X,i,0) );
+                    
+                    if (i+nb >= M)
+                      break;
+                    
+                    DGEMM_LEFT( M-i-nb, N, nb, neg_one, _(A,i+nb,i), _(X,i,0), one, _(B,i+nb,0) );
+                  }
+                  
+                  //check_last_error() ; 
+                }
+
+
+            }
+          else // if ( uplo == clblasUpper) 
+            {
+             /* the upper case */
+              /* handle the first block seperately with alpha */
+              int mm = (M % nb == 0) ? nb : (M % nb);
+              i = M-mm;
+              DGEMM_LEFT( mm, N, mm, alpha, _(InvA,0,i), _(B,i,0), zero, _(X,i,0) );
+
+              if (i-nb >= 0)
+                {                 
+                  DGEMM_LEFT( i, N, mm, neg_one, _(A,0,i), _(X,i,0), alpha, _(B,0,0) );
+
+                  /* the rest blocks */
+                  for( i=M-mm-nb; i >= 0; i -= nb ) {
+                    DGEMM_LEFT( nb, N, nb, one, _(InvA,0,i), _(B,i,0), zero, _(X,i,0) );
+
+                    if (i-nb < 0)
+                      break;
+
+                    DGEMM_LEFT( i, N, nb, neg_one, _(A,0,i), _(X,i,0), one, _(B,0,0) );
+                  }
+                }
+            }
+        }
+      else 
+        {
+          /* the transpose case */
+          if (uplo == clblasLower) 
+            {
+              /* the lower case */
+              /* handle the first block seperately with alpha */
+
+              int mm = (M % nb == 0) ? nb : (M % nb);
+              i = M-mm;
+              DGEMM_LEFT(  mm, N, mm, alpha, _(InvA,0,i), _(B,i,0), zero, _(X,i,0) );
+
+              if (i-nb >= 0)
+                {
+                  DGEMM_LEFT(  i, N, mm, neg_one, _(A,i,0), _(X,i,0), alpha, _(B,0,0)  );
+                  
+                  /* the rest blocks */
+                  for( i=M-mm-nb; i >= 0; i -= nb ) {
+                    DGEMM_LEFT(  nb, N, nb, one, _(InvA,0,i), _(B,i,0), zero, _(X,i,0)  );
+                    
+                    if (i-nb < 0)
+                      break;
+                    
+                    DGEMM_LEFT( i, N, nb, neg_one, _(A,i,0), _(X,i,0), one, _(B,0,0)  );
+                  }
+                }
+            }
+          else 
+            {
+              /* the upper case */
+              /* handle the first block seperately with alpha */
+              int mm = min(nb, (int)M);
+              DGEMM_LEFT( mm, N, mm, alpha, _(InvA,0,0), _(B,0,0), zero, _(X,0,0) );
+
+              if (nb < M) 
+                {
+                 
+                  DGEMM_LEFT( M-nb, N, nb, neg_one, _(A,0,nb) , _(X,0,0), alpha, _(B,nb,0)  );
+
+                  /* the rest blocks */
+                  for( i=nb; i < M; i += nb ) {
+                    mm = min((int)M-i, nb);
+                    DGEMM_LEFT( mm, N, mm, one, _(InvA,0,i), _(B,i,0), zero, _(X,i,0) );
+
+                    if (i+nb >= M)
+                      break;
+
+                    DGEMM_LEFT( M-i-nb, N, nb, neg_one, _(A,i,i+nb), _(X,i,0), one, _(B,i+nb,0)  );
+                  }
+                }
+            }
+        }
+    }
+  else 
+    {
+
+      //
+      // Helper for C = alpha * B * A + beta * C        
+      //
+      // In the calls below
+      //  - the 2nd matrix shall be either A or InvA transposed according to transA
+      //  - the 1st and 3rd matrices are either B and X
+      //
+#define DGEMM_RIGHT(m,n,k, alpha,  B, A, beta, C )   \
+    do { \
+      err = clblasDgemm(clblasColumnMajor, clblasNoTrans, transA , m, n, k, alpha, B, A, beta, C , 1, &queue, 0, NULL, event ) ; \
+      check_error(err) ; \
+    } while(0) 
+
+
+      // side=R
+      /* invert the diagonals
+       * Allocate device memory for the inverted diagonal blocks, size=n*BLOCK_SIZE
+       */
+      
+      /* invert the diagonals
+       * Allocate device memory for the inverted diagonal blocks, size=m*nb
+       */
+      size_t ldInvA = nb ; 
+      size_t offInvA = 0; //must be 0: needed by the _(X,i,j) macro
+      size_t size_InvA = ldInvA * BLOCKS(N,nb) * nb *sizeof(double); 
+      InvA = clCreateBuffer(context, CL_MEM_READ_WRITE, size_InvA, NULL, &err);
+      check_error(err) ;         
+      err = clearBuffer( queue, InvA, size_InvA ) ;
+      check_error(err) ; 
+
+      diag_dtrtri (prg, queue, N, uplo, diag, A, offA, InvA, ldA, event);
+
+      if (transA == clblasNoTrans) 
+        {
+          /* the non-transpose case */
+          if (uplo == clblasLower) 
+            {
+              /* the lower case */
+              /* handle the first block seperately with alpha */
+
+              int nn = (N % nb == 0) ? nb : (N % nb);
+              i = N-nn;
+              DGEMM_RIGHT(  M, nn, nn, alpha, _(B,0,i), _(InvA,0,i), zero, _(X,0,i) );
+
+              if (i-nb >= 0)
+                {
+                 
+                  DGEMM_RIGHT(  M, i, nn, neg_one, _(X,0,i), _(A,i,0), alpha, _(B,0,0) );
+
+                  /* the rest blocks */
+                  for( i=N-nn-nb; i >= 0; i -= nb ) {
+                    DGEMM_RIGHT(  M, nb, nb, one, _(B,0,i), _(InvA,0,i), zero, _(X,0,i) );
+                    
+                    if (i-nb < 0)
+                      break;
+                    
+                    DGEMM_RIGHT(  M, i, nb, neg_one, _(X,0,i), _(A,i,0), one, _(B,0,0) );
+                  }
+                }
+            }
+          else 
+            {
+              /* the upper case */
+              /* handle the first block seperately with alpha */
+              int nn = min(nb, (int)N);
+              DGEMM_RIGHT(  M, nn, nn, alpha, _(B,0,0), _(InvA,0,0), zero, _(X,0,0) );
+
+              if (nb < N)
+                {
+                  
+                  DGEMM_RIGHT(  M, N-nb, nb, neg_one, _(X,0,0), _(A,0,nb), alpha, _(B,0,nb)  );
+                  
+                  /* the rest blocks */
+                  for( i=nb; i < N; i += nb ) {
+                    nn = min(nb, (int)N-i);
+                    DGEMM_RIGHT(  M, nn, nn, one, _(B,0,i), _(InvA,0,i), zero, _(X,0,i) );
+                    
+                    if (i+nb >= N)
+                      break;
+                    
+                    DGEMM_RIGHT(  M, N-i-nb, nb, neg_one, _(X,0,i),   _(A,i,i+nb), one, _(B,0,i+nb)  );
+                  }
+                }
+            }
+        }
+      else 
+        {
+
+          /* the transpose case */
+          if (uplo == clblasLower) 
+            {
+              /* the lower case */
+              /* handle the first block seperately with alpha */
+
+              int nn = min(nb, (int)N);
+              DGEMM_RIGHT(  M, nn, nn, alpha, _(B,0,0), _(InvA,0,0), zero, _(X,0,0) );
+
+              if (nb < N) 
+                {
+                  
+                  DGEMM_RIGHT(  M, N-nb, nb, neg_one, _(X,0,0), _(A,nb,0), alpha, _(B,0,nb)  );
+                  
+                  /* the rest blocks */
+                  for( i=nb; i < N; i += nb ) {
+                    nn = min(nb, (int)N-i);
+                    DGEMM_RIGHT(  M, nn, nn, one, _(B,0,i), _(InvA,0,i), zero, _(X,0,i) );
+                    
+                    if (i+nb >= N)
+                      break;
+                    
+                    DGEMM_RIGHT(  M, N-i-nb, nb, neg_one, _(X,0,i),  _(A,nb+i,i), one, _(B,0,i+nb) );
+                  }
+                }
+            }
+          else 
+            {
+              /* the upper case */
+              /* handle the first block seperately with alpha */
+              int nn = (N % nb == 0) ? nb : (N % nb);
+              i = N-nn;
+              DGEMM_RIGHT(  M, nn, nn, alpha, _(B,0,i), _(InvA,0,i), zero, _(X,0,i) );
+
+              if (i-nb >= 0)
+                {
+                 
+                  DGEMM_RIGHT(  M, i, nn, neg_one, _(X,0,i), _(A,0,i), alpha, _(B,0,0) );
+
+                  /* the rest blocks */
+                  for( i=N-nn-nb; i >= 0; i -= nb ) {
+                    DGEMM_RIGHT(  M, nb, nb, one, _(B,0,i), _(InvA,0,i), zero, _(X,0,i) );
+
+                    if (i-nb < 0)
+                      break;
+
+                    DGEMM_RIGHT(  M, i, nb, neg_one, _(X,0,i), _(A,0,i), one, _(B,0,0) );
+                  }
+                }
+            }
+        }
+      
+    }
+
+  // Copy X(m,n) to B(m,n)
+  {     
+    size_t src_origin[3] = { 0, 0, 0 } ;
+    size_t dst_origin[3] = { offB*sizeof(double), 0, 0 } ;
+    size_t region[3]     = { M*sizeof(double), N, 1 } ;
+    
+
+    err =  clEnqueueCopyBufferRect( queue,
+                                    X,
+                                    B,
+                                    src_origin,
+                                    dst_origin,
+                                    region,
+                                    ldX*sizeof(double), 0,  
+                                    ldB*sizeof(double), 0,  
+                                    0, NULL,
+                                    event) ;
+    check_error(err) ;         
+
+    clReleaseMemObject(InvA);
+    clReleaseMemObject(X);
+
+  }
+
+  return err;
+
+}
+
+
+
+clblasStatus clblasDtrsmFunctorGpu::execute(Args &args) 
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s\n", "dtrsm_gpu") ;
+
+  
+  cl_program prg = this->m_program;
+  
+
+  err = cl_dtrsm( prg,
+                  queue , 
+                  args.side, 
+                  args.uplo, 
+                  args.transA, 
+                  args.diag, 
+                  args.M, 
+                  args.N,
+                  args.alpha,
+                  args.A, args.offA, args.lda,
+                  args.B, args.offB, args.ldb,
+                  args.events
+                  );
+
+
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  return clblasStatus(err) ;
+}
+
+
+
+clblasDtrsmFunctorGpu * 
+clblasDtrsmFunctorGpu::provide(clblasDtrsmFunctor::Args & args , const char* DevName) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+  Cache::Lookup lookup(cache, ctxt, dev, true) ;
+
+  if ( lookup.ok() ){
+    clblasDtrsmFunctorGpu * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+ 
+  clblasDtrsmFunctorGpu * functor = new clblasDtrsmFunctorGpu(args, err, DevName, bitness);
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+    
+}
+
diff --git a/src/library/blas/functor/hawaii.cc b/src/library/blas/functor/hawaii.cc
new file mode 100644
index 0000000..d7b32de
--- /dev/null
+++ b/src/library/blas/functor/hawaii.cc
@@ -0,0 +1,167 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <hawaii.h>
+#include "gpu_dtrsm.h"
+#include "gcn_dgemmCommon.h"
+#include "math.h"
+//#include "hawaii_dgemmChannelConflict.h"
+#include "hawaii_dgemmSplitKernel.h"
+#include "hawaii_sgemmSplitKernel.h"
+#include "gcn_dgemmSmallMatrices.h"
+#include "gcn_sgemmSmallMatrices.h"
+#include "hawaii_sgemmBranchKernel.h"
+
+FunctorSelectorHawaii FunctorSelectorHawaii::instance ;
+
+
+FunctorSelectorHawaii::FunctorSelectorHawaii()
+    : clblasFunctorSelector(HAWAII)
+{
+    
+}
+
+//
+// The selector function for DGEMM on hawaii 
+//
+//
+clblasDgemmFunctor * FunctorSelectorHawaii::select_dgemm_specific(clblasDgemmFunctor::Args & args)
+{
+#ifdef CLBLAS_HAWAII_DYNAMIC_KERNEL
+	return this->clblasFunctorSelector::select_dgemm_specific(args);
+#else
+  clblasDgemmFunctor * functor;
+
+  bool NN_NT = ((args.transA==clblasNoTrans && args.transB==clblasTrans ) || ( args.transA==clblasNoTrans && args.transB==clblasNoTrans ));
+  bool SmallMatrices = args.M/6*args.N/6<85*85;
+  SmallMatrices= SmallMatrices && ((args.M%24==0&&args.N%24==0)||(args.M%16==0&&args.N%16==0))&&args.K%8==0 && (args.transA==clblasNoTrans && args.transB==clblasTrans );//*/&&NN_NT;
+  
+  
+  
+  bool BestPerf= (args.M%48==0 && args.N%48==0) ||
+                 (args.M%32==0 && args.M>4000 && args.N%32==0 && args.N>4000) || (args.M%40==0 && args.M>4000 && args.N%40==0 && args.N>4000) ||
+                  ((args.M%32!=0 && args.M>1000) || (args.N%32!=0 && args.N>1000)) || ((args.M%40!=0 && args.M>1000) || (args.N%40!=0 && args.N>1000)) ;
+
+  bool useSpliKernel = (NN_NT &&  BestPerf);
+
+  if (args.alpha!=0)
+  {
+    if (SmallMatrices)
+    {
+      functor = clBlasGCNDgemmSmallMatricesFunctor::provide(args, "Hawaii");
+      if (functor) 
+        return functor;
+    }
+    else if ( useSpliKernel) 
+    {
+      functor = clBlashawaiiDgemmSplitKernelFunctor::provide(args);
+      if (functor) 
+        return functor;
+    }
+
+    functor = clBlasGCNdgemmCommonFunctor::provide(args, "Hawaii");
+    if (functor) 
+      return functor;
+
+
+
+    //{
+    //  functor = clBlashawaiiDgemmChannelConflictFunctor::provide(args);
+    //  if (functor) 
+    //    return functor;
+
+    //}
+  }
+   
+  // else use the fallback implementation
+  return this->clblasFunctorSelector::select_dgemm_specific(args);
+#endif
+}
+
+
+// The selector function for SGEMM on hawaii 
+clblasSgemmFunctor * FunctorSelectorHawaii::select_sgemm_specific(clblasSgemmFunctor::Args & args)
+{
+#ifdef CLBLAS_HAWAII_DYNAMIC_KERNEL
+	return this->clblasFunctorSelector::select_sgemm_specific(args);
+#else
+  //TODO: the logic below is complicated; Needs cleanup;
+  clblasSgemmFunctor * functor;
+  bool Not_TT = ((args.transA==clblasNoTrans && args.transB==clblasTrans ) || ( args.transA==clblasNoTrans && args.transB==clblasNoTrans ) || ( args.transA==clblasTrans && args.transB==clblasNoTrans ));
+  bool SmallMatrices = args.M/6*args.N/6<200*200 || ((args.M%64!=0 && args.N%64!=0 && args.M<1900 &&args.N<1900 ) && (args.M%96!=0 && args.N%96!=0 && args.M<1900 &&args.N<1900 ));
+  bool SmallMatricesMod32= (SmallMatrices && (args.M%32==0&&args.N%32==0)) ;
+  SmallMatricesMod32 = SmallMatricesMod32&&Not_TT&&args.K % 16 == 0;
+  //SmallMatrices= false;
+  
+  bool useSpliKernel=((args.M%96==0 && args.N%96==0) || !(args.M%64==0 && args.N%64==0&& args.M<4000 &&args.N<4000)) /*&&args.K%16==0*/;
+  useSpliKernel=useSpliKernel&&Not_TT;
+  
+  //the English translation of below is: if small matrix that is not mod32 and NT and K has to be mod 16
+  if (SmallMatrices && (!SmallMatricesMod32) && (args.transA == clblasNoTrans && args.transB == clblasTrans) && (args.K%16 == 0))
+  {
+	  functor = clBlashawaiiSgemmBranchKernelFunctor::provide(args, "Hawaii");
+	  if (functor)
+		  return functor;
+  }
+
+  if (args.alpha!=0 )
+  {
+	  if (SmallMatricesMod32)
+    {
+      functor = clBlasGCNSgemmSmallMatricesFunctor::provide(args, "Hawaii");
+      if (functor) 
+        return functor;
+    }
+    if ( useSpliKernel) 
+    {
+      functor = clBlashawaiiSgemmSplitKernelFunctor::provide(args, "Hawaii");
+      if (functor) 
+        return functor;
+    }
+    else
+    {
+      functor = clblasSgemmFunctorGCN::provide(args, "Hawaii");
+      if (functor) 
+        return functor;
+    }
+  }
+  
+  // else use the fallback implementation
+  return this->clblasFunctorSelector::select_sgemm_specific(args);
+#endif
+}
+
+
+// The selector function for DTRSM on hawaii
+//
+clblasDtrsmFunctor * FunctorSelectorHawaii::select_dtrsm_specific(clblasDtrsmFunctor::Args & args)
+{
+#ifdef CLBLAS_HAWAII_DYNAMIC_KERNEL
+	return this->clblasFunctorSelector::select_dtrsm_specific(args);
+#else
+  clblasDtrsmFunctor * functor;
+  
+  functor = clblasDtrsmFunctorGpu::provide(args, "Hawaii");
+  if (functor) 
+    return functor;
+  
+  // else use the fallback implementation
+  return this->clblasFunctorSelector::select_dtrsm_specific(args);
+#endif
+}
+
+
+
diff --git a/src/library/blas/functor/hawaii_dgemmChannelConflict.cc b/src/library/blas/functor/hawaii_dgemmChannelConflict.cc
new file mode 100644
index 0000000..59a34ad
--- /dev/null
+++ b/src/library/blas/functor/hawaii_dgemmChannelConflict.cc
@@ -0,0 +1,159 @@
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+
+#include "BinaryBuild.h"
+#include "hawaii_dgemmChannelConflict.h"
+
+
+#if BUILD_KERNEL_FROM_STRING
+#include "dgemm_hawaiiChannelConfilct.clT"
+#else 
+
+#include "dgemm_hawaiiChannelConfilct.clHawaii_64.bin.clT"
+#endif
+
+// Just because the full name is too long
+typedef clblasDgemmFunctorGCN::Variant Variant ; 
+
+//typedef clblasFunctorCache<clBlashawaiiDgemmChannelConflictFunctor,const Variant *> Cache ;
+//static Cache cache  ;
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+static const Variant * select_variant_hawaiiChannelConflict( clblasDgemmFunctor::Args & args, cl_uint _64BitsUse )
+{
+  //return the only variant we have for the moment as we only support NT!!!!!
+#if BUILD_KERNEL_FROM_STRING
+  static const Variant variant = {"dgemmBlockTempLocalPrefetch", dgemm_NT_ChannelConflict, NULL, NULL, 0, clblasNoTrans, clblasTrans, 256,256,2, {8,8}, {2,4}};
+  return &variant;
+#else
+
+    
+  if(_64BitsUse==64)
+  {
+    static const Variant variant = {"dgemmBlockTempLocalPrefetch", NULL, NULL, dgemm_NT_ChannelConflict_64_bin_Hawaii, sizeof(dgemm_NT_ChannelConflict_64_bin_Hawaii), clblasNoTrans, clblasTrans, 256,256,2, {8,8}, {2,4}};
+    return &variant ; 
+  }
+  else
+  {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+  }
+
+    
+#endif
+  
+  //return NULL;
+}
+
+clBlashawaiiDgemmChannelConflictFunctor::clBlashawaiiDgemmChannelConflictFunctor(Args & args, const Variant * variant, cl_int & err) 
+{
+
+  cl_device_id device;
+  cl_context context;
+  m_program=NULL;
+  m_variant = variant;
+
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variant->kernel_name) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clBlashawaiiDgemmChannelConflictFunctor");
+
+  bl.variantRaw( this->m_variant->kernel_name, strlen(this->m_variant->kernel_name)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variant->bin != 0 ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      err = bl.buildFromBinary(this->m_variant->bin, this->m_variant->bin_size, this->m_variant->build_options);
+    }
+    else
+    {
+      // directly build from a char* 
+      err = bl.buildFromSource(this->m_variant->source);
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+clBlashawaiiDgemmChannelConflictFunctor * 
+  clBlashawaiiDgemmChannelConflictFunctor::provide(clblasDgemmFunctor::Args & args) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+
+  const Variant * variant = select_variant_hawaiiChannelConflict( args,  bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+  //for now we only have one variant, but we are working on others one and therefore I prefer keeping the code to manage them
+  //Cache::Lookup lookup(cache, ctxt, dev, variant) ;
+  
+
+  //if ( lookup.ok() )
+  //{
+  //  clBlashawaiiDgemmChannelConflictFunctor * functor = lookup.get();
+  //  functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+  //  return functor;
+  //}
+
+  clBlashawaiiDgemmChannelConflictFunctor * functor = new clBlashawaiiDgemmChannelConflictFunctor(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  //lookup.set(functor) ;
+
+  return functor;
+
+}
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/hawaii_dgemmSplitKernel.cc b/src/library/blas/functor/hawaii_dgemmSplitKernel.cc
new file mode 100644
index 0000000..b25824d
--- /dev/null
+++ b/src/library/blas/functor/hawaii_dgemmSplitKernel.cc
@@ -0,0 +1,670 @@
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+
+#include "BinaryBuild.h"
+#include "hawaii_dgemmSplitKernel.h"
+
+
+#if BUILD_KERNEL_FROM_STRING
+#include "dgemm_hawaiiSplitKernel.clT"
+#else 
+
+#include "dgemm_hawaiiSplitKernel.clHawaii_64.bin.clT"
+#endif
+
+// Just because the full name is too long
+typedef clBlashawaiiDgemmSplitKernelFunctor::Variant Variant ; 
+
+//define the string name of the soure/binary code
+#define DGEMM_SRC_NAME(TA,TB, DIVK, MULT)    dgemm_##TA##TB##_##DIVK##_SPLIT##MULT
+#define DGEMM_SRC_NAME_HAWAII(TA,TB, DIVK, MULT, BITS)   dgemm_##TA##TB##_##DIVK##_SPLIT##MULT##_##BITS##_bin_Hawaii
+
+//variant name used to differentiate the different ones
+#define DGEMM_VARIANT_NAME(TA,TB, DIVK, MULT) "dgemm_" #TA #TB "_" #DIVK "_SPLIT" #MULT
+//DGEMM_VARIANT_NAME(TA, TB, DIVM , DIVN, DIVK, GREATER48M, GREATER48N, NBKERNEL),    
+
+#define DGEMM_KERNEL_NAME(TA,TB,DIVM,DIVN,DIVK,BS0,BS1,NV0,NV1,MULT, BLOC) "dgemm_"  #TA #TB "_" #DIVM "_" #DIVN "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT "_SPLIT_" #BLOC
+
+
+#define trans_N clblasNoTrans
+#define trans_T clblasTrans
+
+// Fill a variant descriptor using OpenCL source 
+#define DGEMM_VARIANT_OBJ(TA,TB,DIVK,BS0,BS1,NV0,NV1, BITS, MULT,  \
+  KERNEL_NAME_MAIN, KERNEL_NAME_ROW, KERNEL_NAME_COLUMN, KERNEL_NAME_SINGLE, \
+  KERNELS_SRC,  \
+  KERNEL_BUILD_OPTIONS,  \
+  KERNELS_BIN,  \
+  KERNEL_BIN_SIZE) { \
+  DGEMM_VARIANT_NAME(TA,TB, DIVK, MULT),                                          \
+{ KERNEL_NAME_MAIN, KERNEL_NAME_ROW, KERNEL_NAME_COLUMN, KERNEL_NAME_SINGLE } , \
+  KERNELS_SRC,  \
+  KERNEL_BUILD_OPTIONS, \
+  KERNELS_BIN, \
+  KERNEL_BIN_SIZE, \
+  trans_##TA, trans_##TB,                                       \
+  DIVK ,                                                        \
+{ BS0, BS1 } ,                                                \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+typedef clblasFunctorCache<clBlashawaiiDgemmSplitKernelFunctor,const Variant *> CacheSplit ;
+static CacheSplit cachesplit  ;
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+
+
+//static bool applicable( const Variant & var, clblasDgemmFunctor::Args & args, int RefMultiple ) 
+//{
+//#if 0
+//  // Transpose values are tested in select_variant
+//  if ( args.transA != var.transA ) return false ;
+//  if ( args.transB != var.transB ) return false ;
+//#endif
+//
+//  //if (args.N>=var.divN && args.N % var.divN != 0 )
+//  if ( args.N % var.divN != 0 ) 
+//    return false ; 
+//  if ( args.M % var.divM != 0 ) 
+//    return false ; 
+//  if(var.Greater[0]?args.M<RefMultiple:args.M>=RefMultiple)
+//    return false;
+//  if(var.Greater[1]?args.N<RefMultiple:args.N>=RefMultiple)
+//    return false;
+//  if ( args.beta==0 && var.mult.compare("__ALPHA")!=0)
+//    return false ;
+//  return true ;
+//}
+
+
+
+
+static const Variant * select_variant_hawaiiSplitKernel( clblasDgemmFunctor::Args & args, cl_uint _64BitsUse )
+{
+
+
+
+  if ( args.transA == clblasNoTrans ) 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+
+      // ===== dgemm NN ======
+
+      const char* KName_NNMain = DGEMM_KERNEL_NAME(N, N, 48, 48, 8, 8, 8, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NNRow = DGEMM_KERNEL_NAME(N, N, 1, 48, 8, 8, 8, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NNColumn = DGEMM_KERNEL_NAME(N, N, 48, 1, 8, 8, 8, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NNSingleWave = DGEMM_KERNEL_NAME(N, N, 1, 1, 8, 8, 8, 6, 6, __ALPHABETA, SINGLE) ;
+                         
+      const char* KName_NNMainAlpha = DGEMM_KERNEL_NAME(N, N, 48, 48, 8, 8, 8, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NNRowAlpha = DGEMM_KERNEL_NAME(N, N, 1, 48, 8, 8, 8, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NNColumnAlpha = DGEMM_KERNEL_NAME(N, N, 48, 1, 8, 8, 8, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NNSingleWaveAlpha = DGEMM_KERNEL_NAME(N, N, 1, 1, 8, 8, 8, 6, 6, __ALPHA, SINGLE) ;
+                         
+      const char* KName_NNMainK1 = DGEMM_KERNEL_NAME(N, N, 48, 48, 1, 8, 8, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NNRowK1 = DGEMM_KERNEL_NAME(N, N, 1, 48, 1, 8, 8, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NNColumnK1 = DGEMM_KERNEL_NAME(N, N, 48, 1, 1, 8, 8, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NNSingleWaveK1 = DGEMM_KERNEL_NAME(N, N, 1, 1, 1, 8, 8, 6, 6, __ALPHABETA, SINGLE) ;
+                         
+      const char* KName_NNMainK1Alpha = DGEMM_KERNEL_NAME(N, N, 48, 48, 1, 8, 8, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NNRowK1Alpha = DGEMM_KERNEL_NAME(N, N, 1, 48, 1, 8, 8, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NNColumnK1Alpha = DGEMM_KERNEL_NAME(N, N, 48, 1, 1, 8, 8, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NNSingleWaveK1Alpha = DGEMM_KERNEL_NAME(N, N, 1, 1, 1, 8, 8, 6, 6, __ALPHA, SINGLE) ;
+
+#if BUILD_KERNEL_FROM_STRING
+      const char* KSrc_NTMain = DGEMM_SRC_NAME(N, N, 48, 48, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTRow = DGEMM_SRC_NAME(N, N, 1, 48, 8,8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTColumn = DGEMM_SRC_NAME(N, N, 48, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTSingleWave = DGEMM_SRC_NAME(N, N, 1, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+#else
+
+      const char*  KBin_NNMain64 = DGEMM_SRC_NAME_HAWAII(N, N, 8, __ALPHABETA,  64) ;
+      const size_t KBin_NNMainSize64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, N, 8, __ALPHABETA,  64)) ;
+                         
+      const char*  KBin_NNMainAlpha64 = DGEMM_SRC_NAME_HAWAII(N, N, 8, __ALPHA,  64) ;
+      const size_t KBin_NNMainAlphaSize64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, N, 8, __ALPHA,  64)) ;
+                         
+      const char*  KBin_NNMainK164 = DGEMM_SRC_NAME_HAWAII(N, N, 1, __ALPHABETA,  64) ;
+      const size_t KBin_NNMainK1Size64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, N, 1, __ALPHABETA,  64)) ;
+                         
+      const char*  KBin_NNMainK1Alpha64 = DGEMM_SRC_NAME_HAWAII(N, N, 1, __ALPHA,  64) ;
+      const size_t KBin_NNMainK1AlphaSize64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, N, 1, __ALPHA,  64)) ;
+#endif
+      if(args.K%8==0)
+      {
+        if (args.beta!=0)
+        {
+
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,N,8,8,8,6,6,64,__ALPHABETA, 
+              KName_NNMain,KName_NNRow, KName_NNColumn, KName_NNSingleWave ,
+              NULL,
+              NULL,
+              KBin_NNMain64,
+              KBin_NNMainSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+        else
+        {
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,N,8,8,8,6,6,64,__ALPHA,
+              KName_NNMainAlpha,KName_NNRowAlpha, KName_NNColumnAlpha, KName_NNSingleWaveAlpha ,
+              NULL,
+              NULL,
+              KBin_NNMainAlpha64,
+              KBin_NNMainAlphaSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+      }
+      else
+      {
+        if (args.beta!=0)
+        {
+
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,N,1,8,8,6,6,64,__ALPHABETA, 
+              KName_NNMainK1,KName_NNRowK1, KName_NNColumnK1, KName_NNSingleWaveK1 ,
+              NULL,
+              NULL,
+              KBin_NNMainK164,
+              KBin_NNMainK1Size64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+        else
+        {
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,N,1,8,8,6,6,64,__ALPHA,
+              KName_NNMainK1Alpha,KName_NNRowK1Alpha, KName_NNColumnK1Alpha, KName_NNSingleWaveK1Alpha ,
+              NULL,
+              NULL,
+              KBin_NNMainK1Alpha64,
+              KBin_NNMainK1AlphaSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+      }
+
+
+    }
+    if (args.transB == clblasTrans)
+    {
+
+      const char* KName_NTMain = DGEMM_KERNEL_NAME(N, T, 48, 48, 8, 8, 8, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NTRow = DGEMM_KERNEL_NAME(N, T, 1, 48, 8, 8, 8, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NTColumn = DGEMM_KERNEL_NAME(N, T, 48, 1, 8, 8, 8, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NTSingleWave = DGEMM_KERNEL_NAME(N, T, 1, 1, 8, 8, 8, 6, 6, __ALPHABETA, SINGLE) ;
+
+      const char* KName_NTMainAlpha = DGEMM_KERNEL_NAME(N, T, 48, 48, 8, 8, 8, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NTRowAlpha = DGEMM_KERNEL_NAME(N, T, 1, 48, 8, 8, 8, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NTColumnAlpha = DGEMM_KERNEL_NAME(N, T, 48, 1, 8, 8, 8, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NTSingleWaveAlpha = DGEMM_KERNEL_NAME(N, T, 1, 1, 8, 8, 8, 6, 6, __ALPHA, SINGLE) ;
+
+      const char* KName_NTMainK1 = DGEMM_KERNEL_NAME(N, T, 48, 48, 1, 8, 8, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NTRowK1 = DGEMM_KERNEL_NAME(N, T, 1, 48, 1, 8, 8, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NTColumnK1 = DGEMM_KERNEL_NAME(N, T, 48, 1, 1, 8, 8, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NTSingleWaveK1 = DGEMM_KERNEL_NAME(N, T, 1, 1, 1, 8, 8, 6, 6, __ALPHABETA, SINGLE) ;
+
+      const char* KName_NTMainK1Alpha = DGEMM_KERNEL_NAME(N, T, 48, 48, 1, 8, 8, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NTRowK1Alpha = DGEMM_KERNEL_NAME(N, T, 1, 48, 1, 8, 8, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NTColumnK1Alpha = DGEMM_KERNEL_NAME(N, T, 48, 1, 1, 8, 8, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NTSingleWaveK1Alpha = DGEMM_KERNEL_NAME(N, T, 1, 1, 1, 8, 8, 6, 6, __ALPHA, SINGLE) ;
+
+#if BUILD_KERNEL_FROM_STRING
+      const char* KSrc_NTMain = DGEMM_SRC_NAME(N, T, 48, 48, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTRow = DGEMM_SRC_NAME(N, T, 1, 48, 8,8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTColumn = DGEMM_SRC_NAME(N, T, 48, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTSingleWave = DGEMM_SRC_NAME(N, T, 1, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+#else
+
+      const char* KBin_NTMain64 = DGEMM_SRC_NAME_HAWAII(N, T, 8, __ALPHABETA,  64) ;
+      const size_t KBin_NTMainSize64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, T, 8, __ALPHABETA,  64)) ;
+
+      const char* KBin_NTMainAlpha64 = DGEMM_SRC_NAME_HAWAII(N, T, 8, __ALPHA,  64) ;
+      const size_t KBin_NTMainAlphaSize64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, T, 8, __ALPHA,  64)) ;
+
+      const char* KBin_NTMainK164 = DGEMM_SRC_NAME_HAWAII(N, T, 1, __ALPHABETA,  64) ;
+      const size_t KBin_NTMainK1Size64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, T, 1, __ALPHABETA,  64)) ;
+
+      const char* KBin_NTMainK1Alpha64 = DGEMM_SRC_NAME_HAWAII(N, T, 1, __ALPHA,  64) ;
+      const size_t KBin_NTMainK1AlphaSize64 = sizeof(DGEMM_SRC_NAME_HAWAII(N, T, 1, __ALPHA,  64)) ;
+#endif
+
+      // ===== dgemm NT ======
+      if(args.K%8==0)
+      {
+        if (args.beta!=0)
+        {
+
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,T,8,8,8,6,6,64,__ALPHABETA, 
+              KName_NTMain,KName_NTRow, KName_NTColumn, KName_NTSingleWave ,
+              NULL,
+              NULL,
+              KBin_NTMain64,
+              KBin_NTMainSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+        else
+        {
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,T,8,8,8,6,6,64,__ALPHA,
+              KName_NTMainAlpha,KName_NTRowAlpha, KName_NTColumnAlpha, KName_NTSingleWaveAlpha ,
+              NULL,
+              NULL,
+              KBin_NTMainAlpha64,
+              KBin_NTMainAlphaSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+      }
+      else
+      {
+        if (args.beta!=0)
+        {
+
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,T,1,8,8,6,6,64,__ALPHABETA, 
+              KName_NTMainK1,KName_NTRowK1, KName_NTColumnK1, KName_NTSingleWaveK1 ,
+              NULL,
+              NULL,
+              KBin_NTMainK164,
+              KBin_NTMainK1Size64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+        else
+        {
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = DGEMM_VARIANT_OBJ(N,T,1,8,8,6,6,64,__ALPHA,
+              KName_NTMainK1Alpha,KName_NTRowK1Alpha, KName_NTColumnK1Alpha, KName_NTSingleWaveK1Alpha ,
+              NULL,
+              NULL,
+              KBin_NTMainK1Alpha64,
+              KBin_NTMainK1AlphaSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+			      assert(1);
+            return NULL;
+          }   
+        }
+      }
+
+    }
+  }
+
+  return NULL;
+}
+
+clBlashawaiiDgemmSplitKernelFunctor::clBlashawaiiDgemmSplitKernelFunctor(Args & args, const Variant * variant, cl_int & err) 
+{
+
+  cl_device_id device;
+  cl_context context;
+  m_program=NULL;
+  m_variantSplit = variant;
+
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variantSplit->variantName) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clBlashawaiiDgemmSplitKernelFunctor");
+
+  bl.variantRaw( this->m_variantSplit->variantName, strlen(this->m_variantSplit->variantName)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variantSplit->bin != NULL ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      //only 1 binary containing all the kernel
+      err = bl.buildFromBinary(this->m_variantSplit->bin, this->m_variantSplit->bin_size, /*this->m_variantSplit->build_options[i]*/ "-cl-std=2.0");
+    }
+    else
+    {
+      //// directly build from a char* 
+      //for (int i=0; i<4; i++)
+      //  if(this->m_variantSplit->source[i] != 0)
+      //    err = bl.buildFromSource(this->m_variantSplit->source[i]);
+      if (VERB) printf(" ===> BUILD PROBLEM WE DON'T SUPPORT SOURCE BUILD FOR SPLIT DGEMM\n") ;
+      return;
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+clBlashawaiiDgemmSplitKernelFunctor * 
+  clBlashawaiiDgemmSplitKernelFunctor::provide(clblasDgemmFunctor::Args & args) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+
+  int major;
+  int minor;
+
+  getCLVersion(dev, major, minor);
+
+  if (major<2)
+    return NULL;
+
+  const Variant * variant = select_variant_hawaiiSplitKernel( args,  bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+  CacheSplit::Lookup lookup(cachesplit, ctxt, dev, variant) ;
+
+
+  if ( lookup.ok() )
+  {
+    clBlashawaiiDgemmSplitKernelFunctor * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+
+  clBlashawaiiDgemmSplitKernelFunctor * functor = new clBlashawaiiDgemmSplitKernelFunctor(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+
+}
+
+
+cl_int clBlashawaiiDgemmSplitKernelFunctor::KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[4], Args &args)
+{
+  size_t GlobalX =args.M/m_variantSplit->bwi[0];
+  GlobalX-=GlobalX%m_variantSplit->ls[0];
+  //
+
+  size_t GlobalY = args.N/m_variantSplit->bwi[1];
+  GlobalY-=GlobalY%m_variantSplit->ls[1];
+
+
+  std::size_t gs[2] = {GlobalX, GlobalY};
+  cl_int error = 0;
+
+  if (args.M%48==0 && args.N%48==0)
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 0 \n") ;
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,args.events);
+    return error;
+  }
+
+  if (args.M%48!=0 && args.N%48!=0 && args.M>=48 && args.N>=48 )
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 0, 1, 2, 3 \n") ;
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+
+    gs[0] = 8;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+
+    gs[1] = 8;
+    gs[0] = GlobalX;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+
+    gs[0] = 8; gs[1] = 8;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+    return error;
+  }
+  if (args.M%48==0 && args.N%48!=0 &&  args.N>48 )
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 0, 2, \n") ;
+
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+    gs[1] = 8;
+	error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, 0, NULL, args.events);
+
+    return error;
+  }
+  if (args.N%48==0 && args.M%48!=0 &&  args.M>48 )
+  {
+        if (VERB) printf(" ===> EXECUTE KERNEL 0, 1 \n") ;
+
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+    gs[0] = 8;
+	error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, 0, NULL, args.events);
+
+    return error;
+  }
+  if(args.M<48 && args.N%48==0)
+  {
+        if (VERB) printf(" ===> EXECUTE KERNEL 1, \n") ;
+
+    gs[0] = 8;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, args.events);
+    return error;
+  }
+  if(args.M<48 && args.N%48!=0 && args.N>=48)
+  {
+        if (VERB) printf(" ===> EXECUTE KERNEL  1, 3 \n") ;
+
+    gs[0] = 8;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, NULL);
+    gs[1] = 8;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+    return error;
+  }
+  if(args.N<48 && args.M%48==0)
+  {
+        if (VERB) printf(" ===> EXECUTE KERNEL  2 \n") ;
+
+    gs[1] = 8;  
+    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, args.events);
+    return error;
+  }
+  if(args.N<48 && args.M%48!=0&& args.M>=48)
+  {
+        if (VERB) printf(" ===> EXECUTE KERNEL 2, 3 \n") ;
+
+    gs[1] = 8;  
+    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, NULL);
+
+    gs[0] = 8;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+    return error;
+  }
+  if (args.N<48 && args.M<48)
+  {
+     if (VERB) printf(" ===> EXECUTE KERNEL  3 \n") ;
+    gs[0] = 8; gs[1] = 8;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls,args.numEventsInWaitList, args.eventWaitList, args.events);
+    return error;
+  }
+
+  return clblasNotImplemented;
+}
+
+
+
+clblasStatus clBlashawaiiDgemmSplitKernelFunctor::execute(Args &args)
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s, alpha =%f ,beta = %f\n", this->m_variantSplit->kernel_name, args.alpha, args.beta) ;
+
+  cl_kernel kernel[4]; 
+  int NBKernel = 0;
+
+  for (int i=0; i<4; i++)
+  {
+    if (this->m_variantSplit->kernel_name[i])
+    {
+      kernel[i ]= clCreateKernel( this->m_program, this->m_variantSplit->kernel_name[i],  &err);
+      if (err != CL_SUCCESS) return clblasStatus(err) ; 
+      NBKernel++;
+    }
+    else
+      break;
+  }
+
+  if (NBKernel != 4) return clblasStatus(clblasBuildProgramFailure) ; 
+
+  if (VERB)
+  {
+    for (int i=0; i<NBKernel; i++)
+      printf(" ===> FOUND %s\n", this->m_variantSplit->kernel_name[i]) ;
+  }
+
+  unsigned int M  = (unsigned int )args.M, N = (unsigned int )args.N, K = (unsigned int )args.K;
+  unsigned int lda = (unsigned int )args.lda, ldb = (unsigned int )args.ldb, ldc = (unsigned int )args.ldc;
+
+  int offsetA = (int)args.offA;
+  int offsetB = (int)args.offB;
+  int offsetC = (int)args.offC;
+
+  int arg[4]={0, 0, 0, 0} ; 
+
+  //// All dgemm kernels shall have the same arguments: (A,B,C,M,N,K,alpha,beta,lda,ldb,ldc,offa,offb,offc) 
+
+  for (int i=0; i<NBKernel; i++)
+  {
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.A);
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.B);
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.C);
+
+    setKernelArg<int>(kernel[i], arg[i]++, M);
+    setKernelArg<int>(kernel[i], arg[i]++, N);
+    setKernelArg<int>(kernel[i], arg[i]++, K);
+
+    setKernelArg<cl_double>(kernel[i], arg[i]++, args.alpha);
+    if (args.beta!=0 && this->m_variantSplit->mult.compare("__ALPHA")!=0)
+      setKernelArg<cl_double>(kernel[i], arg[i]++, args.beta);
+
+    setKernelArg<int>(kernel[i], arg[i]++, lda);
+    setKernelArg<int>(kernel[i], arg[i]++, ldb);
+    setKernelArg<int>(kernel[i], arg[i]++, ldc);
+
+    setKernelArg<int>(kernel[i], arg[i]++, offsetA);
+    setKernelArg<int>(kernel[i], arg[i]++, offsetB);
+    setKernelArg<int>(kernel[i], arg[i]++, offsetC);
+  }
+
+  err = KernelsLaunch(queue, kernel, args);
+
+
+
+  for (int i = 0; i<NBKernel; i++)
+    clReleaseKernel(kernel[i]) ;
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  return clblasStatus(err) ;
+
+}
+#endif
diff --git a/src/library/blas/functor/hawaii_sgemmBranchKernel.cc b/src/library/blas/functor/hawaii_sgemmBranchKernel.cc
new file mode 100644
index 0000000..158e80b
--- /dev/null
+++ b/src/library/blas/functor/hawaii_sgemmBranchKernel.cc
@@ -0,0 +1,392 @@
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+
+#include "BinaryBuild.h"
+#include "hawaii_sgemmBranchKernel.h"
+
+//only non-multiples of 32 is implemented right now, which is a small matrix.
+#if BUILD_KERNEL_FROM_STRING
+#include "sgemm_gcn_SmallMatrices.clT"
+#else 
+#include "sgemm_gcn_SmallMatrices.clHawaii_64.bin.clT"
+#include "sgemm_gcn_SmallMatrices.clBonaire_64.bin.clT"
+#endif
+
+// Just because the full name is too long
+typedef clBlashawaiiSgemmBranchKernelFunctor::Variant Variant;
+
+//define the string name of the soure/binary code
+#define SGEMM_SRC_NAME(TA,TB, DIVK, MULT)    sgemm_##TA##TB##_##DIVK##_SPLIT##MULT
+#define SGEMM_SRC_NAME_HAWAII(TA,TB, DIVK, MULT, BITS)   sgemm_##TA##TB##_##DIVK##_SPLIT##MULT##_##BITS##_bin_Hawaii
+#define SGEMM_SRC_NAME_BONAIRE(TA,TB, DIVK, MULT, BITS)   sgemm_##TA##TB##_##DIVK##_SPLIT##MULT##_##BITS##_bin_Bonaire
+#define SGEMM_SRC_NAME_BIN(TA,TB, DIVK, MULT, BITS, DEVICE)   SGEMM_SRC_NAME##_##DEVICE(TA,TB, DIVK, MULT, BITS)
+
+
+//variant name used to differentiate the different ones
+#define SGEMM_VARIANT_NAME(TA,TB, DIVK, MULT) "sgemm_" #TA #TB "_" #DIVK "_SPLIT" #MULT
+//SGEMM_VARIANT_NAME(TA, TB, DIVM , DIVN, DIVK, GREATER48M, GREATER48N, NBKERNEL),    
+
+#define SGEMM_KERNEL_NAME(TA,TB,DIVM,DIVN,DIVK,BS0,BS1,NV0,NV1,MULT, BLOC) "sgemm_"  #TA #TB "_" #DIVM "_" #DIVN "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT "_" #BLOC
+
+
+#define trans_N clblasNoTrans
+#define trans_T clblasTrans
+
+// Fill a variant descriptor using OpenCL source 
+#define SGEMM_VARIANT_OBJ(TA,TB,DIVK,BS0,BS1,NV0,NV1, BITS, MULT,  \
+  KERNEL_NAME_MAIN, \
+  KERNELS_SRC,  \
+  KERNEL_BUILD_OPTIONS,  \
+  KERNELS_BIN,  \
+  KERNEL_BIN_SIZE) { \
+  SGEMM_VARIANT_NAME(TA,TB, DIVK, MULT),                                          \
+{ KERNEL_NAME_MAIN } , \
+  KERNELS_SRC,  \
+  KERNEL_BUILD_OPTIONS, \
+  KERNELS_BIN, \
+  KERNEL_BIN_SIZE, \
+  trans_##TA, trans_##TB,                                       \
+  DIVK ,                                                        \
+{ BS0, BS1 } ,                                                \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+typedef clblasFunctorCache<clBlashawaiiSgemmBranchKernelFunctor, const Variant *> CacheBranch;
+static CacheBranch cachebranch  ;
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+
+
+//static bool applicable( const Variant & var, clblasSgemmFunctor::Args & args, int RefMultiple ) 
+//{
+//#if 0
+//  // Transpose values are tested in select_variant
+//  if ( args.transA != var.transA ) return false ;
+//  if ( args.transB != var.transB ) return false ;
+//#endif
+//
+//  //if (args.N>=var.divN && args.N % var.divN != 0 )
+//  if ( args.N % var.divN != 0 ) 
+//    return false ; 
+//  if ( args.M % var.divM != 0 ) 
+//    return false ; 
+//  if(var.Greater[0]?args.M<RefMultiple:args.M>=RefMultiple)
+//    return false;
+//  if(var.Greater[1]?args.N<RefMultiple:args.N>=RefMultiple)
+//    return false;
+//  if ( args.beta==0 && var.mult.compare("__ALPHA")!=0)
+//    return false ;
+//  return true ;
+//}
+
+static void to_upper(char* input)
+{
+  while(*input)
+  {
+    *input=toupper(*input);
+    input++;
+  }
+}
+
+
+static const Variant * select_variant_BranchKernel(clblasSgemmFunctor::Args & args, const char* DevName, cl_uint _64BitsUse)
+{
+	if (_64BitsUse != 64)
+	{
+		std::cout << "we don't support clblas on 32 bits" << std::endl;
+		assert(1);
+		return NULL;
+	}
+
+
+	if (args.transA == clblasNoTrans)
+	{
+		if (args.transB == clblasNoTrans)
+		{
+
+			// ===== sgemm NN ======
+			// currently not supported
+			return NULL;
+		}
+		if (args.transB == clblasTrans)
+		{
+			//sgemm_NT_32_32_16_16x16_2x2__ALPHABETA_BRANCH
+			const char* KName_NT = SGEMM_KERNEL_NAME(N, T, 32, 32, 16, 16, 16, 2, 2, __ALPHABETA, BRANCH);
+
+
+			const char* KBin_NT64;
+			size_t KBin_NTSize64 = 0;
+
+
+#if BUILD_KERNEL_FROM_STRING
+			//currently not supported
+			return NULL;
+#else
+			if (!strcmp(DevName, "Hawaii"))
+			{
+				//KBin_NT64             = SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHABETA,  64, HAWAII) ;
+				KBin_NT64 = sgemm_NT_32_32_16_16x16_2x2__ALPHABETA_BRANCH_64_bin_Hawaii;
+				KBin_NTSize64 = sizeof(sgemm_NT_32_32_16_16x16_2x2__ALPHABETA_BRANCH_64_bin_Hawaii);
+
+			}
+			else if (!strcmp(DevName, "Bonaire"))
+			{
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+				KBin_NT64 = sgemm_NT_32_32_16_16x16_2x2__ALPHABETA_BRANCH_64_bin_Bonaire;
+				KBin_NTSize64 = sizeof(sgemm_NT_32_32_16_16x16_2x2__ALPHABETA_BRANCH_64_bin_Bonaire);
+#endif
+
+			}
+#endif
+
+			// ===== SGEMM NT ======
+			static const Variant variant = SGEMM_VARIANT_OBJ(N, T, 16, 16, 16, 2, 2, 64, __ALPHABETA,
+				KName_NT,
+				NULL,
+				NULL,
+				KBin_NT64,
+				KBin_NTSize64);
+
+			return &variant;
+		}
+		else
+		{
+			if (args.transB == clblasNoTrans)
+			{
+
+				// ===== sgemm TN ======
+				// currently not supported
+				return NULL;
+			}
+		}
+
+		return NULL;
+	}
+}
+
+clBlashawaiiSgemmBranchKernelFunctor::clBlashawaiiSgemmBranchKernelFunctor(Args & args, const Variant * variant, cl_int & err)
+{
+
+  cl_device_id device;
+  cl_context context;
+  m_program=NULL;
+  m_variantBranch = variant;
+
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variantBranch->variantName) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clBlashawaiiSgemmBranchKernelFunctor");
+
+  bl.variantRaw( this->m_variantBranch->variantName, strlen(this->m_variantBranch->variantName)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variantBranch->bin != NULL ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      //only 1 binary containing all the kernel
+      err = bl.buildFromBinary(this->m_variantBranch->bin, this->m_variantBranch->bin_size, /*this->m_variantBranch->build_options[i]*/ "-cl-std=2.0");
+    }
+    else
+    {
+      //// directly build from a char* 
+      //for (int i=0; i<4; i++)
+      //  if(this->m_variantBranch->source[i] != 0)
+      //    err = bl.buildFromSource(this->m_variantSplit->source[i]);
+      if (VERB) printf(" ===> BUILD PROBLEM WE DON'T SUPPORT SOURCE BUILD FOR Branch SGEMM\n") ;
+      return;
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+clBlashawaiiSgemmBranchKernelFunctor * 
+  clBlashawaiiSgemmBranchKernelFunctor::provide(clblasSgemmFunctor::Args & args, char* DevName) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+
+  int major;
+  int minor;
+
+  getCLVersion(dev, major, minor);
+
+  if (major<2)
+    return NULL;
+
+  // to_upper( DevName);
+  const Variant * variant = select_variant_BranchKernel( args, DevName, bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+  CacheBranch::Lookup lookup(cachebranch, ctxt, dev, variant) ;
+
+
+  if ( lookup.ok() )
+  {
+    clBlashawaiiSgemmBranchKernelFunctor * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+
+  clBlashawaiiSgemmBranchKernelFunctor * functor = new clBlashawaiiSgemmBranchKernelFunctor(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+
+}
+
+
+cl_int clBlashawaiiSgemmBranchKernelFunctor::KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[1], Args &args)
+{
+  // ((Mvalue - 1) / 32 + 1) * 16
+  size_t GlobalX = ((args.M-1) /(m_variantBranch->bwi[0]*m_variantBranch->ls[0]) + 1)*16 ;
+  
+  //
+
+  size_t GlobalY = ((args.N - 1) / (m_variantBranch->bwi[1] * m_variantBranch->ls[1]) + 1) * 16;
+
+
+  std::size_t gs[2] = {GlobalX, GlobalY};
+  cl_int error = 0;
+
+
+  if (VERB) printf(" ===> EXECUTE KERNEL 0 \n") ;
+  error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantBranch->ls, args.numEventsInWaitList, args.eventWaitList,args.events);
+  return error;
+
+
+  return clblasNotImplemented;
+}
+
+
+
+clblasStatus clBlashawaiiSgemmBranchKernelFunctor::execute(Args &args)
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s, alpha =%f ,beta = %f\n", this->m_variantBranch->kernel_name, args.alpha, args.beta) ;
+
+  cl_kernel kernel[1]; 
+  int NBKernel = 0;
+
+
+  if (this->m_variantBranch->kernel_name[0])
+  {
+    kernel[0]= clCreateKernel( this->m_program, this->m_variantBranch->kernel_name[0],  &err);
+    if (err != CL_SUCCESS)
+        return clblasStatus(err) ; 
+    NBKernel++;
+  }
+
+
+  if (NBKernel != 1) return clblasStatus(clblasBuildProgramFailure) ; 
+
+  if (VERB)
+  {
+    for (int i=0; i<NBKernel; i++)
+      printf(" ===> FOUND %s\n", this->m_variantBranch->kernel_name[i]) ;
+  }
+
+  int M   = args.M, N = args.N, K = args.K;
+  int lda = args.lda, ldb = args.ldb, ldc = args.ldc;
+
+  int offsetA = args.offA;
+  int offsetB = args.offB;
+  int offsetC = args.offC;
+
+  int arg[4]={0, 0, 0, 0} ; 
+
+  //// All sgemm kernels shall have the same arguments: (A,B,C,M,N,K,alpha,beta,lda,ldb,ldc,offa,offb,offc) 
+
+  for (int i=0; i<NBKernel; i++)
+  {
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.A);
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.B);
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.C);
+
+    setKernelArg<int>(kernel[i], arg[i]++, M);
+    setKernelArg<int>(kernel[i], arg[i]++, N);
+    setKernelArg<int>(kernel[i], arg[i]++, K);
+
+    setKernelArg<cl_float>(kernel[i], arg[i]++, args.alpha);
+    if (args.beta!=0 && this->m_variantBranch->mult.compare("__ALPHA")!=0)
+      setKernelArg<cl_float>(kernel[i], arg[i]++, args.beta);
+
+    setKernelArg<int>(kernel[i], arg[i]++, lda);
+    setKernelArg<int>(kernel[i], arg[i]++, ldb);
+    setKernelArg<int>(kernel[i], arg[i]++, ldc);
+
+    setKernelArg<int>(kernel[i], arg[i]++, offsetA);
+    setKernelArg<int>(kernel[i], arg[i]++, offsetB);
+    setKernelArg<int>(kernel[i], arg[i]++, offsetC);
+  }
+
+  err = KernelsLaunch(queue, kernel, args);
+
+
+
+  for (int i = 0; i<NBKernel; i++)
+    clReleaseKernel(kernel[i]) ;
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  // err= clFinish(queue);
+  return clblasStatus(err) ;
+
+}
+#endif
diff --git a/src/library/blas/functor/hawaii_sgemmSplitKernel.cc b/src/library/blas/functor/hawaii_sgemmSplitKernel.cc
new file mode 100644
index 0000000..23828dd
--- /dev/null
+++ b/src/library/blas/functor/hawaii_sgemmSplitKernel.cc
@@ -0,0 +1,834 @@
+#ifndef CLBLAS_HAWAII_DYNAMIC_KERNEL
+
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <devinfo.h>
+#include "clblas-internal.h"
+#include "solution_seq.h"
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#include <functor_xgemm.h>
+#include <tahiti.h>
+#include <hawaii.h>
+
+#include "BinaryBuild.h"
+#include "hawaii_sgemmSplitKernel.h"
+
+
+#if BUILD_KERNEL_FROM_STRING
+#include "sgemm_hawaiiSplitKernel.clT"
+#else 
+#include "sgemm_hawaiiSplitKernel.clHawaii_64.bin.clT"
+#include "sgemm_hawaiiSplitKernel.clBonaire_64.bin.clT"
+#endif
+
+// Just because the full name is too long
+typedef clBlashawaiiSgemmSplitKernelFunctor::Variant Variant ; 
+
+//define the string name of the soure/binary code
+#define SGEMM_SRC_NAME(TA,TB, DIVK, MULT)    sgemm_##TA##TB##_##DIVK##_SPLIT##MULT
+#define SGEMM_SRC_NAME_HAWAII(TA,TB, DIVK, MULT, BITS)   sgemm_##TA##TB##_##DIVK##_SPLIT##MULT##_##BITS##_bin_Hawaii
+#define SGEMM_SRC_NAME_BONAIRE(TA,TB, DIVK, MULT, BITS)   sgemm_##TA##TB##_##DIVK##_SPLIT##MULT##_##BITS##_bin_Bonaire
+#define SGEMM_SRC_NAME_BIN(TA,TB, DIVK, MULT, BITS, DEVICE)   SGEMM_SRC_NAME##_##DEVICE(TA,TB, DIVK, MULT, BITS)
+
+
+//variant name used to differentiate the different ones
+#define SGEMM_VARIANT_NAME(TA,TB, DIVK, MULT) "sgemm_" #TA #TB "_" #DIVK "_SPLIT" #MULT
+//SGEMM_VARIANT_NAME(TA, TB, DIVM , DIVN, DIVK, GREATER48M, GREATER48N, NBKERNEL),    
+
+#define SGEMM_KERNEL_NAME(TA,TB,DIVM,DIVN,DIVK,BS0,BS1,NV0,NV1,MULT, BLOC) "sgemm_"  #TA #TB "_" #DIVM "_" #DIVN "_" #DIVK "_" #BS0 "x" #BS1 "_" #NV0 "x" #NV1 #MULT "_SPLIT_" #BLOC
+
+
+#define trans_N clblasNoTrans
+#define trans_T clblasTrans
+
+// Fill a variant descriptor using OpenCL source 
+#define SGEMM_VARIANT_OBJ(TA,TB,DIVK,BS0,BS1,NV0,NV1, BITS, MULT,  \
+  KERNEL_NAME_MAIN, KERNEL_NAME_ROW, KERNEL_NAME_COLUMN, KERNEL_NAME_SINGLE, \
+  KERNELS_SRC,  \
+  KERNEL_BUILD_OPTIONS,  \
+  KERNELS_BIN,  \
+  KERNEL_BIN_SIZE) { \
+  SGEMM_VARIANT_NAME(TA,TB, DIVK, MULT),                                          \
+{ KERNEL_NAME_MAIN, KERNEL_NAME_ROW, KERNEL_NAME_COLUMN, KERNEL_NAME_SINGLE } , \
+  KERNELS_SRC,  \
+  KERNEL_BUILD_OPTIONS, \
+  KERNELS_BIN, \
+  KERNEL_BIN_SIZE, \
+  trans_##TA, trans_##TB,                                       \
+  DIVK ,                                                        \
+{ BS0, BS1 } ,                                                \
+{ NV0, NV1 } ,                                                      \
+#MULT                                                               \
+} 
+
+typedef clblasFunctorCache<clBlashawaiiSgemmSplitKernelFunctor,const Variant *> CacheSplit ;
+static CacheSplit cachesplit  ;
+
+// Make it 1 to enable additional debug 'print' 
+#define VERB 0
+
+
+
+//static bool applicable( const Variant & var, clblasSgemmFunctor::Args & args, int RefMultiple ) 
+//{
+//#if 0
+//  // Transpose values are tested in select_variant
+//  if ( args.transA != var.transA ) return false ;
+//  if ( args.transB != var.transB ) return false ;
+//#endif
+//
+//  //if (args.N>=var.divN && args.N % var.divN != 0 )
+//  if ( args.N % var.divN != 0 ) 
+//    return false ; 
+//  if ( args.M % var.divM != 0 ) 
+//    return false ; 
+//  if(var.Greater[0]?args.M<RefMultiple:args.M>=RefMultiple)
+//    return false;
+//  if(var.Greater[1]?args.N<RefMultiple:args.N>=RefMultiple)
+//    return false;
+//  if ( args.beta==0 && var.mult.compare("__ALPHA")!=0)
+//    return false ;
+//  return true ;
+//}
+
+static void to_upper(char* input)
+{
+  while(*input)
+  {
+    *input=toupper(*input);
+    input++;
+  }
+}
+
+
+static const Variant * select_variant_SplitKernel( clblasSgemmFunctor::Args & args, const char* DevName, cl_uint _64BitsUse )
+{
+  if(_64BitsUse!=64)
+  {
+    std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+    assert(1);
+    return NULL;
+  }
+
+
+  if ( args.transA == clblasNoTrans ) 
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+
+      // ===== sgemm NN ======
+      // return NULL;
+      const char* KName_NNMain = SGEMM_KERNEL_NAME(N, N, 96, 96, 16, 16, 16, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NNRow = SGEMM_KERNEL_NAME(N, N, 1, 96, 16, 16, 16, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NNColumn = SGEMM_KERNEL_NAME(N, N, 96, 1, 16, 16, 16, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NNSingleWave = SGEMM_KERNEL_NAME(N, N, 1, 1, 16, 16, 16, 6, 6, __ALPHABETA, SINGLE) ;
+
+      const char* KName_NNMainAlpha = SGEMM_KERNEL_NAME(N, N, 96, 96, 16, 16, 16, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NNRowAlpha = SGEMM_KERNEL_NAME(N, N, 1, 96, 16, 16, 16, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NNColumnAlpha = SGEMM_KERNEL_NAME(N, N, 96, 1, 16, 16, 16, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NNSingleWaveAlpha = SGEMM_KERNEL_NAME(N, N, 1, 1, 16, 16, 16, 6, 6, __ALPHA, SINGLE) ;
+
+      const char* KName_NNMainK1 = SGEMM_KERNEL_NAME(N, N, 96, 96, 1, 16, 16, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NNRowK1 = SGEMM_KERNEL_NAME(N, N, 1, 96, 1, 16, 16, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NNColumnK1 = SGEMM_KERNEL_NAME(N, N, 96, 1, 1, 16, 16, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NNSingleWaveK1 = SGEMM_KERNEL_NAME(N, N, 1, 1, 1, 16, 16, 6, 6, __ALPHABETA, SINGLE) ;
+
+      const char* KName_NNMainK1Alpha = SGEMM_KERNEL_NAME(N, N, 96, 96, 1, 16, 16, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NNRowK1Alpha = SGEMM_KERNEL_NAME(N, N, 1, 96, 1, 16, 16, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NNColumnK1Alpha = SGEMM_KERNEL_NAME(N, N, 96, 1, 1, 16, 16, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NNSingleWaveK1Alpha = SGEMM_KERNEL_NAME(N, N, 1, 1, 1, 16, 16, 6, 6, __ALPHA, SINGLE) ;
+
+#if BUILD_KERNEL_FROM_STRING
+      const char* KSrc_NTMain = SGEMM_SRC_NAME(N, N, 48, 48, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTRow = SGEMM_SRC_NAME(N, N, 1, 48, 8,8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTColumn = SGEMM_SRC_NAME(N, N, 48, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTSingleWave = SGEMM_SRC_NAME(N, N, 1, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+#else
+
+      const char* KBin_NNMain64 ;           
+      size_t KBin_NNMainSize64 = 0;       
+
+      const char* KBin_NNMainAlpha64  ;     
+      size_t KBin_NNMainAlphaSize64 = 0;    
+
+      const char* KBin_NNMainK164   ;       
+      size_t KBin_NNMainK1Size64 = 0;       
+
+      const char* KBin_NNMainK1Alpha64 ;    
+      size_t KBin_NNMainK1AlphaSize64 = 0;  
+      if (!strcmp(DevName, "Hawaii"))
+      {
+        KBin_NNMain64             = SGEMM_SRC_NAME_BIN(N, N, 16, __ALPHABETA,  64, HAWAII) ;
+        KBin_NNMainSize64        = sizeof(SGEMM_SRC_NAME_BIN(N, N, 16, __ALPHABETA,  64, HAWAII)) ;
+
+        KBin_NNMainAlpha64        = SGEMM_SRC_NAME_BIN(N, N, 16, __ALPHA,  64, HAWAII) ;
+        KBin_NNMainAlphaSize64   = sizeof(SGEMM_SRC_NAME_BIN(N, N, 16, __ALPHA,  64, HAWAII)) ;
+
+        KBin_NNMainK164           = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, HAWAII) ;
+        KBin_NNMainK1Size64      = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, HAWAII)) ;
+
+        KBin_NNMainK1Alpha64      = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, HAWAII) ;
+        KBin_NNMainK1AlphaSize64 = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, HAWAII)) ;
+      }
+      else if (!strcmp(DevName, "Bonaire"))
+      {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        KBin_NNMain64             = SGEMM_SRC_NAME_BIN(N, N, 16, __ALPHABETA,  64, BONAIRE) ;
+        KBin_NNMainSize64        = sizeof(SGEMM_SRC_NAME_BIN(N, N, 16, __ALPHABETA,  64, BONAIRE)) ;
+
+        KBin_NNMainAlpha64        = SGEMM_SRC_NAME_BIN(N, N, 16, __ALPHA,  64, BONAIRE) ;
+        KBin_NNMainAlphaSize64   = sizeof(SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHA,  64, BONAIRE)) ;
+
+        KBin_NNMainK164           = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, BONAIRE) ;
+        KBin_NNMainK1Size64      = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, BONAIRE)) ;
+
+        KBin_NNMainK1Alpha64      = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, BONAIRE) ;
+        KBin_NNMainK1AlphaSize64 = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, BONAIRE)) ;
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+      }
+#endif
+      if(args.K%16==0)
+      {
+        if (args.beta!=0)
+        {
+
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = SGEMM_VARIANT_OBJ(N,N,16,16,16,6,6,64,__ALPHABETA, 
+              KName_NNMain,KName_NNRow, KName_NNColumn, KName_NNSingleWave ,
+              NULL,
+              NULL,
+              KBin_NNMain64,
+              KBin_NNMainSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+            assert(1);
+          }   
+        }
+        else
+        {
+            static const Variant variant = SGEMM_VARIANT_OBJ(N,N,16,16,16,6,6,64,__ALPHA,
+              KName_NNMainAlpha,KName_NNRowAlpha, KName_NNColumnAlpha, KName_NNSingleWaveAlpha ,
+              NULL,
+              NULL,
+              KBin_NNMainAlpha64,
+              KBin_NNMainAlphaSize64) ;
+
+            return &variant ; 
+        }
+      }
+      else
+      {
+        if (args.beta!=0)
+        {
+
+            static const Variant variant = SGEMM_VARIANT_OBJ(N,N,1,16,16,6,6,64,__ALPHABETA, 
+              KName_NNMainK1,KName_NNRowK1, KName_NNColumnK1, KName_NNSingleWaveK1 ,
+              NULL,
+              NULL,
+              KBin_NNMainK164,
+              KBin_NNMainK1Size64) ;
+
+            return &variant ; 
+        }
+        else
+        {
+            static const Variant variant = SGEMM_VARIANT_OBJ(N,N,1,16,16,6,6,64,__ALPHA,
+              KName_NNMainK1Alpha,KName_NNRowK1Alpha, KName_NNColumnK1Alpha, KName_NNSingleWaveK1Alpha ,
+              NULL,
+              NULL,
+              KBin_NNMainK1Alpha64,
+              KBin_NNMainK1AlphaSize64) ;
+
+            return &variant ; 
+        }
+      }
+
+
+    }
+    if (args.transB == clblasTrans)
+    {
+
+      const char* KName_NTMain = SGEMM_KERNEL_NAME(N, T, 96, 96, 16, 16, 16, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NTRow = SGEMM_KERNEL_NAME(N, T, 1, 96, 16, 16, 16, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NTColumn = SGEMM_KERNEL_NAME(N, T, 96, 1, 16, 16, 16, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NTSingleWave = SGEMM_KERNEL_NAME(N, T, 1, 1, 16, 16, 16, 6, 6, __ALPHABETA, SINGLE) ;
+
+      const char* KName_NTMainAlpha = SGEMM_KERNEL_NAME(N, T, 96, 96, 16, 16, 16, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NTRowAlpha = SGEMM_KERNEL_NAME(N, T, 1, 96, 16, 16, 16, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NTColumnAlpha = SGEMM_KERNEL_NAME(N, T, 96, 1, 16, 16, 16, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NTSingleWaveAlpha = SGEMM_KERNEL_NAME(N, T, 1, 1, 16, 16, 16, 6, 6, __ALPHA, SINGLE) ;
+
+      const char* KName_NTMainK1 = SGEMM_KERNEL_NAME(N, T, 96, 96, 1, 16, 16, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_NTRowK1 = SGEMM_KERNEL_NAME(N, T, 1, 96, 1, 16, 16, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_NTColumnK1 = SGEMM_KERNEL_NAME(N, T, 96, 1, 1, 16, 16, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_NTSingleWaveK1 = SGEMM_KERNEL_NAME(N, T, 1, 1, 1, 16, 16, 6, 6, __ALPHABETA, SINGLE) ;
+
+      const char* KName_NTMainK1Alpha = SGEMM_KERNEL_NAME(N, T, 96, 96, 1, 16, 16, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_NTRowK1Alpha = SGEMM_KERNEL_NAME(N, T, 1, 96, 1, 16, 16, 6, 6, __ALPHA, ROW) ;
+      const char* KName_NTColumnK1Alpha = SGEMM_KERNEL_NAME(N, T, 96, 1, 1, 16, 16, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_NTSingleWaveK1Alpha = SGEMM_KERNEL_NAME(N, T, 1, 1, 1, 16, 16, 6, 6, __ALPHA, SINGLE) ;
+
+      const char* KBin_NTMain64 ;           
+      size_t KBin_NTMainSize64 = 0;       
+
+      const char* KBin_NTMainAlpha64  ;     
+      size_t KBin_NTMainAlphaSize64 = 0;    
+
+      const char* KBin_NTMainK164   ;       
+      size_t KBin_NTMainK1Size64 = 0;       
+
+      const char* KBin_NTMainK1Alpha64 ;    
+      size_t KBin_NTMainK1AlphaSize64 = 0;   
+
+#if BUILD_KERNEL_FROM_STRING
+      const char* KSrc_NTMain = SGEMM_SRC_NAME(N, T, 96, 96, 16, 16, 16, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTRow = SGEMM_SRC_NAME(N, T, 1, 96, 16,16, 16, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTColumn = SGEMM_SRC_NAME(N, T, 96, 1, 16, 16, 16, 6, 6, __ALPHABETA) ;
+      const char* KSrc_NTSingleWave = SGEMM_SRC_NAME(N, T, 1, 1, 16, 16, 16, 6, 6, __ALPHABETA) ;
+#else
+      if (!strcmp(DevName, "Hawaii"))
+      {
+        KBin_NTMain64             = SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHABETA,  64, HAWAII) ;
+        KBin_NTMainSize64        = sizeof(SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHABETA,  64, HAWAII)) ;
+
+        KBin_NTMainAlpha64        = SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHA,  64, HAWAII) ;
+        KBin_NTMainAlphaSize64   = sizeof(SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHA,  64, HAWAII)) ;
+
+        KBin_NTMainK164           = SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHABETA,  64, HAWAII) ;
+        KBin_NTMainK1Size64      = sizeof(SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHABETA,  64, HAWAII)) ;
+
+        KBin_NTMainK1Alpha64      = SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHA,  64, HAWAII) ;
+        KBin_NTMainK1AlphaSize64 = sizeof(SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHA,  64, HAWAII)) ;
+      }
+      else if (!strcmp(DevName, "Bonaire"))
+      {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        KBin_NTMain64             = SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHABETA,  64, BONAIRE) ;
+        KBin_NTMainSize64        = sizeof(SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHABETA,  64, BONAIRE)) ;
+
+        KBin_NTMainAlpha64        = SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHA,  64, BONAIRE) ;
+        KBin_NTMainAlphaSize64   = sizeof(SGEMM_SRC_NAME_BIN(N, T, 16, __ALPHA,  64, BONAIRE)) ;
+
+        KBin_NTMainK164           = SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHABETA,  64, BONAIRE) ;
+        KBin_NTMainK1Size64      = sizeof(SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHABETA,  64, BONAIRE)) ;
+
+        KBin_NTMainK1Alpha64      = SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHA,  64, BONAIRE) ;
+        KBin_NTMainK1AlphaSize64 = sizeof(SGEMM_SRC_NAME_BIN(N, T, 1, __ALPHA,  64, BONAIRE)) ;
+#endif //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+      }
+#endif
+
+      // ===== SGEMM NT ======
+      if(args.K%16==0)
+      {
+        if (args.beta!=0)
+        {
+
+          static const Variant variant = SGEMM_VARIANT_OBJ(N,T,16,16,16,6,6,64,__ALPHABETA, 
+            KName_NTMain,KName_NTRow, KName_NTColumn, KName_NTSingleWave ,
+            NULL,
+            NULL,
+            KBin_NTMain64,
+            KBin_NTMainSize64) ;
+
+          return &variant ; 
+        }
+        else
+        {
+          static const Variant variant = SGEMM_VARIANT_OBJ(N,T,16,16,16,6,6,64,__ALPHA,
+            KName_NTMainAlpha,KName_NTRowAlpha, KName_NTColumnAlpha, KName_NTSingleWaveAlpha ,
+            NULL,
+            NULL,
+            KBin_NTMainAlpha64,
+            KBin_NTMainAlphaSize64) ;
+
+          return &variant ; 
+        }
+      }
+      else
+      {
+        if (args.beta!=0)
+        {
+
+          static const Variant variant = SGEMM_VARIANT_OBJ(N,T,1,16,16,6,6,64,__ALPHABETA, 
+            KName_NTMainK1,KName_NTRowK1, KName_NTColumnK1, KName_NTSingleWaveK1 ,
+            NULL,
+            NULL,
+            KBin_NTMainK164,
+            KBin_NTMainK1Size64) ;
+
+          return &variant ; 
+        }
+        else
+        {
+          static const Variant variant = SGEMM_VARIANT_OBJ(N,T,1,16,16,6,6,64,__ALPHA,
+            KName_NTMainK1Alpha,KName_NTRowK1Alpha, KName_NTColumnK1Alpha, KName_NTSingleWaveK1Alpha ,
+            NULL,
+            NULL,
+            KBin_NTMainK1Alpha64,
+            KBin_NTMainK1AlphaSize64) ;
+
+          return &variant ; 
+        }
+      }
+
+    }
+  }
+  else
+  {
+    if ( args.transB == clblasNoTrans ) 
+    {
+
+      // ===== sgemm TN ======
+      // return NULL;
+      const char* KName_TNMain = SGEMM_KERNEL_NAME(T, N, 96, 96, 16, 16, 16, 6, 6, __ALPHABETA, MAIN) ;
+      const char* KName_TNRow = SGEMM_KERNEL_NAME(T, N, 1, 96, 16, 16, 16, 6, 6, __ALPHABETA, ROW) ;
+      const char* KName_TNColumn = SGEMM_KERNEL_NAME(T, N, 96, 1, 16, 16, 16, 6, 6, __ALPHABETA, COLUMN) ;
+      const char* KName_TNSingleWave = SGEMM_KERNEL_NAME(T, N, 1, 1, 16, 16, 16, 6, 6, __ALPHABETA, SINGLE) ;
+
+      const char* KName_TNMainAlpha = SGEMM_KERNEL_NAME(T, N, 96, 96, 16, 16, 16, 6, 6, __ALPHA, MAIN) ;
+      const char* KName_TNRowAlpha = SGEMM_KERNEL_NAME(T, N, 1, 96, 16, 16, 16, 6, 6, __ALPHA, ROW) ;
+      const char* KName_TNColumnAlpha = SGEMM_KERNEL_NAME(T, N, 96, 1, 16, 16, 16, 6, 6, __ALPHA, COLUMN) ;
+      const char* KName_TNSingleWaveAlpha = SGEMM_KERNEL_NAME(T, N, 1, 1, 16, 16, 16, 6, 6, __ALPHA, SINGLE) ;
+
+      //const char* KName_TNMainK1 = SGEMM_KERNEL_NAME(N, N, 96, 96, 1, 16, 16, 6, 6, __ALPHABETA, MAIN) ;
+      //const char* KName_TNRowK1 = SGEMM_KERNEL_NAME(N, N, 1, 96, 1, 16, 16, 6, 6, __ALPHABETA, ROW) ;
+      //const char* KName_TNColumnK1 = SGEMM_KERNEL_NAME(N, N, 96, 1, 1, 16, 16, 6, 6, __ALPHABETA, COLUMN) ;
+      //const char* KName_TNSingleWaveK1 = SGEMM_KERNEL_NAME(N, N, 1, 1, 1, 16, 16, 6, 6, __ALPHABETA, SINGLE) ;
+      //                   
+      //const char* KName_TNMainK1Alpha = SGEMM_KERNEL_NAME(N, N, 96, 96, 1, 16, 16, 6, 6, __ALPHA, MAIN) ;
+      //const char* KName_TNRowK1Alpha = SGEMM_KERNEL_NAME(N, N, 1, 96, 1, 16, 16, 6, 6, __ALPHA, ROW) ;
+      //const char* KName_TNColumnK1Alpha = SGEMM_KERNEL_NAME(N, N, 96, 1, 1, 16, 16, 6, 6, __ALPHA, COLUMN) ;
+      //const char* KName_TNSingleWaveK1Alpha = SGEMM_KERNEL_NAME(N, N, 1, 1, 1, 16, 16, 6, 6, __ALPHA, SINGLE) ;
+
+#if BUILD_KERNEL_FROM_STRING
+      const char* KSrc_TNMain = SGEMM_SRC_NAME(T, N, 48, 48, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_TNRow = SGEMM_SRC_NAME(T, N, 1, 48, 8,8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_TNColumn = SGEMM_SRC_NAME(T, N, 48, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+      const char* KSrc_TNSingleWave = SGEMM_SRC_NAME(T, N, 1, 1, 8, 8, 8, 6, 6, __ALPHABETA) ;
+#else
+
+      const char* KBin_TNMain64 ;           
+      size_t KBin_TNMainSize64 = 0;       
+
+      const char* KBin_TNMainAlpha64  ;     
+      size_t KBin_TNMainAlphaSize64 = 0;    
+
+      //const char* KBin_NNMainK164   ;       
+      //size_t KBin_NNMainK1Size64 = 0;       
+
+      //const char* KBin_NNMainK1Alpha64 ;    
+      //size_t KBin_NNMainK1AlphaSize64 = 0;  
+      if (!strcmp(DevName, "Hawaii"))
+      {
+        KBin_TNMain64             = SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHABETA,  64, HAWAII) ;
+        KBin_TNMainSize64        = sizeof(SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHABETA,  64, HAWAII)) ;
+             
+        KBin_TNMainAlpha64        = SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHA,  64, HAWAII) ;
+        KBin_TNMainAlphaSize64   = sizeof(SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHA,  64, HAWAII)) ;
+
+        //KBin_NNMainK164           = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, HAWAII) ;
+        //KBin_NNMainK1Size64      = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, HAWAII)) ;
+
+        //KBin_NNMainK1Alpha64      = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, HAWAII) ;
+        //KBin_NNMainK1AlphaSize64 = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, HAWAII)) ;
+      }
+      else if (!strcmp(DevName, "Bonaire"))
+      {
+#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+        KBin_TNMain64             = SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHABETA,  64, BONAIRE) ;
+        KBin_TNMainSize64         = sizeof(SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHABETA,  64, BONAIRE)) ;
+             
+        KBin_TNMainAlpha64        = SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHA,  64, BONAIRE) ;
+        KBin_TNMainAlphaSize64    = sizeof(SGEMM_SRC_NAME_BIN(T, N, 16, __ALPHA,  64, BONAIRE)) ;
+
+        //KBin_NNMainK164           = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, BONAIRE) ;
+        //KBin_NNMainK1Size64      = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHABETA,  64, BONAIRE)) ;
+
+        //KBin_NNMainK1Alpha64      = SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, BONAIRE) ;
+        //KBin_NNMainK1AlphaSize64 = sizeof(SGEMM_SRC_NAME_BIN(N, N, 1, __ALPHA,  64, BONAIRE)) ;
+#endif  //#ifndef CLBLAS_BONAIRE_DYNAMIC_KERNEL
+      }
+#endif
+      if(args.K%16==0)
+      {
+        if (args.beta!=0)
+        {
+
+          
+            static const Variant variant = SGEMM_VARIANT_OBJ(T,N,16,16,16,6,6,64,__ALPHABETA, 
+              KName_TNMain,KName_TNRow, KName_TNColumn, KName_TNSingleWave ,
+              NULL,
+              NULL,
+              KBin_TNMain64,
+              KBin_TNMainSize64) ;
+
+            return &variant ; 
+        }
+        else
+        {
+            static const Variant variant = SGEMM_VARIANT_OBJ(T,N,16,16,16,6,6,64,__ALPHA,
+              KName_TNMainAlpha,KName_TNRowAlpha, KName_TNColumnAlpha, KName_TNSingleWaveAlpha ,
+              NULL,
+              NULL,
+              KBin_TNMainAlpha64,
+              KBin_TNMainAlphaSize64) ;
+
+            return &variant ; 
+        }
+      }
+     /* else
+      {
+        if (args.beta!=0)
+        {
+
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = SGEMM_VARIANT_OBJ(N,N,1,16,16,6,6,64,__ALPHABETA, 
+              KName_NNMainK1,KName_NNRowK1, KName_NNColumnK1, KName_NNSingleWaveK1 ,
+              NULL,
+              NULL,
+              KBin_NNMainK164,
+              KBin_NNMainK1Size64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+            assert(1);
+          }   
+        }
+        else
+        {
+          if(_64BitsUse==64)
+          {
+            static const Variant variant = SGEMM_VARIANT_OBJ(N,N,1,16,16,6,6,64,__ALPHA,
+              KName_NNMainK1Alpha,KName_NNRowK1Alpha, KName_NNColumnK1Alpha, KName_NNSingleWaveK1Alpha ,
+              NULL,
+              NULL,
+              KBin_NNMainK1Alpha64,
+              KBin_NNMainK1AlphaSize64) ;
+
+            return &variant ; 
+          }
+          else
+          {
+            std::cout<<"we don't support clblas on 32 bits"<< std::endl;
+            assert(1);
+          }   
+        }
+      }*/
+
+
+    }
+  }
+
+  return NULL;
+}
+
+clBlashawaiiSgemmSplitKernelFunctor::clBlashawaiiSgemmSplitKernelFunctor(Args & args, const Variant * variant, cl_int & err)
+{
+
+  cl_device_id device;
+  cl_context context;
+  m_program=NULL;
+  m_variantSplit = variant;
+
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+    return;
+  }
+
+  if (VERB) printf(" ===> GET KERNEL %s\n", this->m_variantSplit->variantName) ;
+
+  //Ben do I use the correct "kernel_name"?
+  BinaryLookup bl(context, device, "clBlashawaiiSgemmSplitKernelFunctor");
+
+  bl.variantRaw( this->m_variantSplit->variantName, strlen(this->m_variantSplit->variantName)+1 ) ;
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+    if ( this->m_variantSplit->bin != NULL ) 
+    {
+      // build from a pre-compiled version of the kernel (SPIR or cl binaries)
+      //only 1 binary containing all the kernel
+      err = bl.buildFromBinary(this->m_variantSplit->bin, this->m_variantSplit->bin_size, /*this->m_variantSplit->build_options[i]*/ "-cl-std=2.0");
+    }
+    else
+    {
+      //// directly build from a char* 
+      //for (int i=0; i<4; i++)
+      //  if(this->m_variantSplit->source[i] != 0)
+      //    err = bl.buildFromSource(this->m_variantSplit->source[i]);
+      if (VERB) printf(" ===> BUILD PROBLEM WE DON'T SUPPORT SOURCE BUILD FOR SPLIT SGEMM\n") ;
+      return;
+    } 
+
+    if ( err != CL_SUCCESS )
+    {  
+      if (VERB) printf(" ===> BUILD PROBLEM\n") ;
+
+      return;
+    }
+  }
+
+  this->m_program = bl.getProgram();
+}
+
+
+
+clBlashawaiiSgemmSplitKernelFunctor * 
+  clBlashawaiiSgemmSplitKernelFunctor::provide(clblasSgemmFunctor::Args & args, char* DevName) 
+{
+
+  if ( args.order == clblasRowMajor ) 
+    return NULL ;   // The RowMajor case shall never occur. 
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+  cl_uint bitness = getAddressBits(dev);
+
+  int major;
+  int minor;
+
+  getCLVersion(dev, major, minor);
+
+  if (major<2)
+    return NULL;
+
+  // to_upper( DevName);
+  const Variant * variant = select_variant_SplitKernel( args, DevName, bitness ) ;
+  if ( variant == NULL )  
+    return NULL ; 
+
+
+
+  CacheSplit::Lookup lookup(cachesplit, ctxt, dev, variant) ;
+
+
+  if ( lookup.ok() )
+  {
+    clBlashawaiiSgemmSplitKernelFunctor * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+
+  clBlashawaiiSgemmSplitKernelFunctor * functor = new clBlashawaiiSgemmSplitKernelFunctor(args, variant, err);
+  if (err != CL_SUCCESS)
+  {
+    return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+
+}
+
+
+cl_int clBlashawaiiSgemmSplitKernelFunctor::KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[4], Args &args)
+{
+  size_t GlobalX =args.M/m_variantSplit->bwi[0];
+  GlobalX-=GlobalX%m_variantSplit->ls[0];
+  //
+
+  size_t GlobalY = args.N/m_variantSplit->bwi[1];
+  GlobalY-=GlobalY%m_variantSplit->ls[1];
+
+
+  std::size_t gs[2] = {GlobalX, GlobalY};
+  cl_int error = 0;
+
+  if (args.M%96==0 && args.N%96==0)
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 0 \n") ;
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,args.events);
+    return error;
+  }
+
+  if (args.M%96!=0 && args.N%96!=0 && args.M>=96 && args.N>=96 )
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 0, 1, 2, 3 \n") ;
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+
+    gs[0] = 16;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+
+    gs[1] = 16;
+    gs[0] = GlobalX;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, 0, NULL,NULL);
+
+    gs[0] = 16; gs[1] = 16;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+    return error;
+  }
+  if (args.M%96==0 && args.N%96!=0 &&  args.N>96 )
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 0, 2, \n") ;
+
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+    gs[1] = 16;
+	error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, 0, NULL, args.events);
+
+    return error;
+  }
+  if (args.N%96==0 && args.M%96!=0 &&  args.M>96 )
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 0, 1 \n") ;
+
+    error = clEnqueueNDRangeKernel(queue, Kernel[0], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList,NULL);
+    gs[0] = 16;
+	error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, 0, NULL, args.events);
+
+    return error;
+  }
+  if(args.M<96 && args.N%96==0)
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 1, \n") ;
+
+    gs[0] = 16;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, args.events);
+    return error;
+  }
+  if(args.M<96 && args.N%96!=0 && args.N>=96)
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL  1, 3 \n") ;
+
+    gs[0] = 16;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[1], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, NULL);
+    gs[1] = 16;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+    return error;
+  }
+  if(args.N<96 && args.M%96==0)
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL  2 \n") ;
+
+    gs[1] = 16;  
+    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, args.events);
+    return error;
+  }
+  if(args.N<96 && args.M%96!=0&& args.M>=96)
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL 2, 3 \n") ;
+
+    gs[1] = 16;  
+    error |= clEnqueueNDRangeKernel(queue, Kernel[2], 2, NULL, gs, m_variantSplit->ls, args.numEventsInWaitList, args.eventWaitList, NULL);
+
+    gs[0] = 16;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls, 0, NULL,args.events);
+    return error;
+  }
+  if (args.N<96 && args.M<96)
+  {
+    if (VERB) printf(" ===> EXECUTE KERNEL  3 \n") ;
+    gs[0] = 16; gs[1] = 16;
+    error |= clEnqueueNDRangeKernel(queue, Kernel[3], 2, NULL, gs, m_variantSplit->ls,args.numEventsInWaitList, args.eventWaitList, args.events);
+    return error;
+  }
+
+  return clblasNotImplemented;
+}
+
+
+
+clblasStatus clBlashawaiiSgemmSplitKernelFunctor::execute(Args &args)
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  if (VERB) printf(" ===> EXECUTE KERNEL %s, alpha =%f ,beta = %f\n", this->m_variantSplit->kernel_name, args.alpha, args.beta) ;
+
+  cl_kernel kernel[4]; 
+  int NBKernel = 0;
+
+  for (int i=0; i<4; i++)
+  {
+    if (this->m_variantSplit->kernel_name[i])
+    {
+      kernel[i ]= clCreateKernel( this->m_program, this->m_variantSplit->kernel_name[i],  &err);
+      if (err != CL_SUCCESS)
+        return clblasStatus(err) ; 
+      NBKernel++;
+    }
+    else
+      break;
+  }
+
+  if (NBKernel != 4) return clblasStatus(clblasBuildProgramFailure) ; 
+
+  if (VERB)
+  {
+    for (int i=0; i<NBKernel; i++)
+      printf(" ===> FOUND %s\n", this->m_variantSplit->kernel_name[i]) ;
+  }
+
+  int M   = args.M, N = args.N, K = args.K;
+  int lda = args.lda, ldb = args.ldb, ldc = args.ldc;
+
+  int offsetA = args.offA;
+  int offsetB = args.offB;
+  int offsetC = args.offC;
+
+  int arg[4]={0, 0, 0, 0} ; 
+
+  //// All sgemm kernels shall have the same arguments: (A,B,C,M,N,K,alpha,beta,lda,ldb,ldc,offa,offb,offc) 
+
+  for (int i=0; i<NBKernel; i++)
+  {
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.A);
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.B);
+    setKernelArg<cl_mem>(kernel[i], arg[i]++, args.C);
+
+    setKernelArg<int>(kernel[i], arg[i]++, M);
+    setKernelArg<int>(kernel[i], arg[i]++, N);
+    setKernelArg<int>(kernel[i], arg[i]++, K);
+
+    setKernelArg<cl_float>(kernel[i], arg[i]++, args.alpha);
+    if (args.beta!=0 && this->m_variantSplit->mult.compare("__ALPHA")!=0)
+      setKernelArg<cl_float>(kernel[i], arg[i]++, args.beta);
+
+    setKernelArg<int>(kernel[i], arg[i]++, lda);
+    setKernelArg<int>(kernel[i], arg[i]++, ldb);
+    setKernelArg<int>(kernel[i], arg[i]++, ldc);
+
+    setKernelArg<int>(kernel[i], arg[i]++, offsetA);
+    setKernelArg<int>(kernel[i], arg[i]++, offsetB);
+    setKernelArg<int>(kernel[i], arg[i]++, offsetC);
+  }
+
+  err = KernelsLaunch(queue, kernel, args);
+
+
+
+  for (int i = 0; i<NBKernel; i++)
+    clReleaseKernel(kernel[i]) ;
+
+  if (VERB) printf(" ===> ERR=%d \n",(int)err) ;
+
+  // err= clFinish(queue);
+  return clblasStatus(err) ;
+
+}
+#endif
diff --git a/src/library/blas/functor/include/BinaryBuild.h b/src/library/blas/functor/include/BinaryBuild.h
new file mode 100644
index 0000000..5053c8f
--- /dev/null
+++ b/src/library/blas/functor/include/BinaryBuild.h
@@ -0,0 +1,10 @@
+#ifndef _BINARY_BUILD_
+#define _BINARY_BUILD_
+
+//#include "CL\opencl.h"
+//manage if we use cl binaries or cl source code
+//#define BUILD_KERNEL_FROM_STRING 1
+
+//find if we use in 32 or 64 bits ISA
+//extern /*char * _64Bits;*/cl_uint _64Bits;
+#endif  //_BINARY_BUILD_
\ No newline at end of file
diff --git a/src/library/blas/functor/include/atomic_counter.h b/src/library/blas/functor/include/atomic_counter.h
new file mode 100644
index 0000000..15f4da6
--- /dev/null
+++ b/src/library/blas/functor/include/atomic_counter.h
@@ -0,0 +1,173 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_ATOMIC_COUNTER_H_
+#define _CLBLAS_ATOMIC_COUNTER_H_
+
+#include <mutex.h>
+
+//
+// This header provides the class clblasAtomicCounter that can be
+// used to implement a thread-safe usage counter as follow:
+//  
+// class MyObject 
+// {
+//    ...
+//    clblasAtomicCounter counter ; 
+//    ... 
+// 
+//    MyObject() : counter(1) {
+//      ...
+//    }
+//
+//    void retain() {
+//       counter.increment() ; 
+//    }
+// 
+//    void release() {
+//       if ( counter.decrement() == 0 ) {
+//         delete this ; 
+//       } 
+//    }
+//   
+// }
+//
+//
+// This header provides 2 versions controled by the macro CLBLAS_USE_STD_ATOMIC:
+//
+//  - if CLBLAS_USE_STD_ATOMIC is set to non-zero then an implementation 
+//    based on <atomic> from C++11 is used 
+//
+//  - if CLBLAS_USE_STD_ATOMIC is set to zero then a portable but less efficient 
+//    version using mutex is used 
+//
+//  - else if CLBLAS_USE_STD_ATOMIC is unset then an automatic detection of <atomic> 
+//    and C++11 is automatically attempted 
+//
+// Remark: there exists several other atomic implementations (e.g. boost, ...) that
+//         could be implemented here
+//
+
+#ifndef CLBLAS_USE_STD_ATOMIC
+
+//
+// FIXME: G++ does not properly declare __cplusplus  according to the standard
+//        but may provide <atomic> at least in recent versions
+//
+
+#if __cplusplus >= 201103L 
+#define CLBLAS_USE_STD_ATOMIC 1
+#else
+#define CLBLAS_USE_STD_ATOMIC 0 
+#endif
+
+#endif
+
+
+#if CLBLAS_USE_STD_ATOMIC
+
+// This is the optimized version using std::atomic from C++11 
+//
+// On the long term that shall be the only version  
+//
+
+#include <atomic> 
+
+class clblasAtomicCounter 
+{
+private:
+    std::atomic<int> value;
+public:
+
+    clblasAtomicCounter(int v) : value(v) { }  
+
+    // Increments the counter and returns the incremented value. 
+    // (so a pre-increment) 
+    int increment() {
+      return ++ value;
+    }
+
+    // Decrements the counter and returns the decremented value. 
+    // (so a pre-decremment) 
+    int decrement() {
+      return -- value;
+    }
+
+    // Provide the counter value 
+    int get(){
+        return value.load();
+    }
+};
+
+#else
+
+//
+// A less optimized but more portable version using 
+// a mutex to insure atomicity
+//
+
+class clblasAtomicCounter 
+{
+private:
+  
+  int       value;
+  mutex_t * mutex ; 
+  
+public:
+
+  clblasAtomicCounter(int v) : value(v)
+  { 
+    mutex = mutexInit() ; 
+  }  
+
+  ~clblasAtomicCounter() 
+  { 
+    mutexDestroy(mutex) ;
+  }  
+  
+  int increment()
+  {
+    int v ;
+    mutexLock( this->mutex ) ; 
+    v = ++ this->value ;
+    mutexUnlock( this->mutex ) ; 
+    return v ; 
+  }
+  
+  int decrement()
+  {
+    int v ;
+    mutexLock( this->mutex ) ; 
+    v = -- this->value;
+    mutexUnlock( this->mutex ) ; 
+    return v ; 
+  }
+
+  int get(){
+    int v ;
+    mutexLock( this->mutex ) ; 
+    v = this->value ;
+    mutexUnlock( this->mutex ) ; 
+    return v ; 
+  }
+
+
+};
+
+#endif
+
+
+#endif
diff --git a/src/library/blas/functor/include/bonaire.h b/src/library/blas/functor/include/bonaire.h
new file mode 100644
index 0000000..7b92594
--- /dev/null
+++ b/src/library/blas/functor/include/bonaire.h
@@ -0,0 +1,41 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#ifndef _CLBLAS_FUNCTION_SELECTOR_BONAIRE_
+#define _CLBLAS_FUNCTION_SELECTOR_BONAIRE_
+
+#include <functor_selector.h>
+//#include <functor_hawaii_dgemm_NT_MN48.h>
+
+class FunctorSelectorBonaire : public clblasFunctorSelector
+{
+private:
+    FunctorSelectorBonaire();
+
+    static FunctorSelectorBonaire instance;
+
+public:
+
+    // we don't want to provide any DP algorithm as DP is slow on bonaire  
+    //virtual clblasDgemmFunctor * select_dgemm_specific(clblasDgemmFunctor::Args & args);
+	  virtual clblasSgemmFunctor * select_sgemm_specific(clblasSgemmFunctor::Args & args);
+  //  virtual clblasDtrsmFunctor * select_dtrsm_specific(clblasDtrsmFunctor::Args & args);
+
+};
+
+
+#endif // _CLBLAS_FUNCTION_SELECTOR_BONAIRE_
diff --git a/src/library/blas/functor/include/functor.h b/src/library/blas/functor/include/functor.h
new file mode 100644
index 0000000..4dc3667
--- /dev/null
+++ b/src/library/blas/functor/include/functor.h
@@ -0,0 +1,496 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_H_
+#define _CLBLAS_FUNCTOR_H_
+
+#include <clBLAS.h>
+#include <cassert>
+#include <map>
+#include <rwlock.h>
+#include "atomic_counter.h"
+#include "functor_utils.h"
+
+//
+// clblasFunctor is the base class for all functors used to implemetn clBLAS API calls 
+//
+// A functor is basically an object that provides an implementation of a given function
+// as a virtual member. 
+//
+// The clblasFunctor class itself does not provide such function but all its
+// derived classes shall provide one. For consistancy, it is recommanded to
+// use 'execute' as the name for the member providing that function.
+//
+// So the class clblasFunctor shall be derived once for each supported
+// function. For instance, the class clblasDgemmFunctor provides the base
+// functor class for implementing the DGEMM call. 
+//
+// The class clblasDgemmFunctor is itself derived one or more times to provide
+// the real implementations of the DGEMM functionnality. 
+//
+// The choice of the proper implementation amongst the available functors is 
+// typically delegated to another mechanism (see clblasFunctorSelector for instance). 
+//
+class clblasFunctor 
+{
+public:
+
+    clblasFunctor();
+    virtual ~clblasFunctor() ;
+
+    static cl_int getDeviceAndContext(cl_command_queue q,
+                                      cl_device_id & device,
+                                      cl_context & context);
+    static cl_uint getAddressBits(cl_device_id & device);
+
+	static void getCLVersion(cl_device_id & device, int&major, int& minor);
+
+
+    template <class T>
+    static void setKernelArg(cl_kernel kernel,
+                             int index,
+                             T data)
+    {
+        cl_int err = clSetKernelArg(kernel,
+                                    (unsigned int)index,
+                                    sizeof(T),
+                                    (const void *)&data);
+
+        assert( err == CL_SUCCESS );
+    }
+   
+    static void setKernelArgPtr(cl_kernel kernel,
+                                int index,
+                                size_t sz,
+                                const void *data)
+    {
+        cl_int err = clSetKernelArg(kernel,
+                                    (unsigned int)index,
+                                    sz,
+                                    data);
+
+        assert( err == CL_SUCCESS );
+    }
+
+    // Indicate that this object has one more user.
+    // The default behavior is to increase the internal use counter.
+    // This function is thread-safe.
+    virtual void retain();
+
+    
+    // Indicate that this object has one less user.
+    // The default behavior is to decrease the internal use counter
+    // and, if it reaches zero, to destroy this object.
+    // This function is thread-safe.
+    virtual void release();
+
+private:
+  
+    // Thread-safe reference counter used by the default implementation 
+    // of retain and release
+    clblasAtomicCounter refcount; 
+
+  //protected:
+  //   cl_program program;
+  //   cl_kernel kernel;
+};
+
+
+
+//
+// This class shall be the base class for all Functor caches.  
+// 
+// The idea is that all Functor caches derived from that class 
+// will register themselves in a global cache pool thus allowing 
+// some global cache management tasks to be performed
+//
+// As of now, the only implemented task is to discard all 
+// cache entries (see cleanFunctorCaches() typically called 
+// during clblasTeardown()) but, in future versions, it 
+// would be nice to have other management tasks such as 
+// removing all cache entries using a specific context 
+// or controling the overall size of the cache
+//
+class clblasFunctorCacheBase {
+public:
+  // Constructor: will register the cache in global cache pool  
+  clblasFunctorCacheBase(); 
+  // Constructor: will unregister the cache in global cache pool 
+  virtual ~clblasFunctorCacheBase() ;
+public:
+  // Discard all members of the cache 
+  virtual void discardAll() = 0 ;
+
+} ;
+
+
+//
+// A dummy class used to represent the absence of additional data. 
+//
+// 
+class clblasNoData {
+public:
+  // Provide the operator '<' needed by std::less 
+  inline bool operator<(const clblasNoData &) const { return false ; }
+} ;
+
+//
+// Represents the only possible value for clblasNoData 
+//
+#define CLBLAS_NO_DATA clblasNoData() 
+
+//
+// The templated class clblasFunctorCache<F,D> provides a reusable 
+// implementation of a cache of functors of type F according to the 
+// openCL context and the openCL device.  
+//
+// The type D is optional and represents additional data used to 
+// index the cache entries. The third optional template 
+// argument CompareD is a comparison object for the type D similar 
+// to those used in std::map or std::sort. 
+//
+// The idea is that each physical implementation of a functor is
+// supposed to manage its own cache in order to become reusable
+// between subsequent calls to clBLAS.
+//
+// The cache implementation is thread-safe assuming that it is used 
+// properly:
+//   (1) The lookup() method shall be called to search an existing 
+//       entry corresponding to the specified context and device. 
+//   (2) If the lookup() is successfull then the reference counter 
+//       on the returned functor is implicitly increased and that 
+//       functor can be used immediately. 
+//   (3) If the lookup() is not successfull - that is if the 
+//       resulting functor is null, then the cache is locked for 
+//       writing and a new cache entry is prepared. The user 
+//       is then responsible for creating a new functor that must 
+//       be registered via a call to setFunctorInEntry().
+//       Alternatively, the new cache entry can be dropped by a 
+//       call to dropEntry().
+//       Not calling setFunctorInEntry() or dropEntry() will leave 
+//       the cache in a locked state thus making it unusable and 
+//       likely to cause a dead-lock. 
+//
+// In order to simplify development and to avoid errors, the
+// clblasFunctorCache provides an Lookup object class that hides most
+// of those details and insure that the case does not stay in an
+// locked state (as long as the loopup object is properly destroyed).
+//
+// In case a non-trivial custom type D is specified it may be necessary 
+// to provide the comparison operator '<' needed by less<D> as in the 
+// following example:
+//
+//    struct MyData 
+//    { 
+//
+//      int x,y,z ; 
+//       
+//      inline bool operator< ( const MyData &b ) const 
+//      {
+//        const MyData & a = *this ; 
+//        if ( a.x < b.x ) return true else if ( a.x > b.x ) return false ; 
+//        if ( a.y < b.y ) return true else if ( a.y > b.y ) return false ; 
+//        if ( a.z < b.z ) return true else if ( a.z > b.z ) return false ; 
+//        return false ; 
+//      }
+//
+//    }
+//
+// Alternatively, if the custom type D is known to be a fully
+// initialized POD (including the unused bits) then the operator 
+// '<' can be provided using memcmp()
+//
+//    #include <cstring>
+//
+//    struct MyData 
+//    { 
+//      MyData() { std::memset(this,0,sizeof(MyData) ; } 
+// 
+//      int x,y,z ; 
+//       
+//      inline bool operator< ( const MyData &b ) const 
+//      {
+//        return std::memcmp( this, b ) < 0 ; 
+//      }
+//    }
+//   
+//
+//
+//
+template<class F, typename D = clblasNoData , typename CompareD = std::less<D> >
+class clblasFunctorCache  : public clblasFunctorCacheBase 
+{
+
+private:
+
+  struct Key { 
+    cl_device_id dev;
+    cl_context   ctxt;
+    D            data ;  // Additional user data in the key 
+
+
+    //compare two Keys
+    bool operator< (const Key & b) const {
+      const Key &a = *this;
+      if(a.dev != b.dev)
+        return a.dev < b.dev;        
+      
+      if(a.ctxt != b.ctxt)
+        return a.ctxt < b.ctxt;  
+
+      CompareD cmp ; 
+      return cmp(a.data,b.data) ;
+    }
+  };
+
+
+  typedef clblasFunctorCache<F,D,CompareD> Cache;
+
+  // The current implementation is using a std::map internally. 
+  // That may not be the most efficient but that can easily be 
+  // changed if needed.
+
+  typedef std::map<Key, F *>        Map;
+  typedef typename Map::iterator    Entry;
+
+
+private:
+
+  Map        m_map;
+  rwlock_t * m_rwlock;
+
+public:
+
+  //Cache constructor: init mutex
+  clblasFunctorCache()
+  {
+    this->m_rwlock = rwlockInit();
+  }
+
+  
+  //Cache destructor: destroy mutex
+  ~clblasFunctorCache(){
+    rwlockDestroy(this->m_rwlock);
+  }
+
+
+public:
+
+  // 
+  // Lookup objects are short time objects used to perform a single query in 
+  // the cache. 
+  //
+  // The usage pattern of a Lookup object shall always be the same
+  //
+  //  - Declare a local Lookup object 
+  //  - Perform a call to the ok() member 
+  //     (1) if true then use the functor returned by get() 
+  //     (2) if false then the cache is locked until the user provides 
+  //         a new functor with a call to set().
+  //  - Destroy the Lookup object
+  //
+  // So a functor implementation can implement its own cache as illustrated 
+  // by the following example:
+  // 
+  //    class MyDGemmFunctor: public clblasDGemmFunctor
+  //    {
+  //
+  //      ... 
+  //
+  //      typedef clblasFunctorCache<MyDGemmFunctor> Cache ;  
+  //      static Cache cache;
+  //
+  //      ...
+  //    
+  //      MyDGemmFunctor * provide(...)
+  //      {
+  //        MyDGemmFunctor * functor ; 
+  //
+  //        MyDGemmFunctor::Cache::Lookup lookup(MyDGemmFunctor::cache, ctxt, dev) ;
+  //        
+  //        if ( lookup.ok() ) 
+  //        {
+  //          return lookup.get() ; 
+  //        } 
+  //        else 
+  //        {
+  //          MyDGemmFunctor * functor = new MyDGemmFunctor(...);
+  //          lookup.set(functor) ;
+  //          return functor ;
+  //        }
+  //        
+  //      }
+  //
+  //      ... 
+  // 
+  //    } ;
+  //
+  //
+  class Lookup {
+  private:
+    Entry         m_entry ;
+    F *           m_functor ;
+    Cache &       m_cache ;
+
+  public:
+
+    // Constructor
+    //
+    // Perform a lookup in the specified cache 
+    // 
+    Lookup(Cache & cache, cl_context ctxt,  cl_device_id dev , const D & data) : m_cache(cache) 
+    {
+      this->m_functor = m_cache.lookup(ctxt,dev,data,this->m_entry) ;
+    }
+
+    //
+    // Alternative constructor when D is the default type clblasNoData
+    // 
+    Lookup(Cache & cache, cl_context ctxt,  cl_device_id dev ) : m_cache(cache) 
+    {
+      this->m_functor = m_cache.lookup(ctxt,dev,CLBLAS_NO_DATA,this->m_entry) ;
+    }
+   
+
+    // Destructor 
+    ~Lookup()
+    {
+      if (  !this->ok() ) {
+        // Hoops! Something went wrong! 
+        // It is important to drop the pending cache entry
+        m_cache.dropPendingEntry(this->m_entry) ;
+      }
+    } 
+    
+     
+    bool ok() {
+      return this->m_functor != NULL ;
+    }
+    
+    
+    F * get() {
+      assert(this->ok()) ;
+      //return m_cache.getFunctorFromEntry(this->entry) ;
+      return this->m_functor;
+    }
+    
+    // Set the functor in the 
+    void set(F* f) 
+    {
+      assert(!this->ok()) ;
+      assert(f != NULL) ;
+      m_cache.fillPendingEntry(this->m_entry,f) ;
+      this->m_functor = f ;
+      this->m_functor->retain();
+    }
+    
+  } ;
+
+ 
+private:
+  
+  // Perform a lookup in the cache.
+  //
+  // In case of success, returns the found functor.
+  //
+  // In case of failure, locks the cache, creates a new pending cache entry (in argument 'entry')
+  // and returns NULL.  The pending 'entry' shall then be populated with a valid functor by a 
+  // call to fillPendingEntry() or shall be dropped by a called dropPendingEntry(). Any failure 
+  // to perform one of those action will let the cache in a locked state thus making is unusable
+  // 
+  // Remark: Direct use of this member is discouraged. Use the Lookup classe instead. 
+  // 
+  F* lookup(cl_context ctxt, cl_device_id dev, const D & data, Entry & entry)
+  {
+
+    Key key = { dev, ctxt , data };
+
+    rwlockReadLock(this->m_rwlock);
+    {
+
+      Entry l_entry = this->m_map.find(key);
+      if( l_entry != this->m_map.end() )
+      {
+        entry = l_entry;
+        F * f =  entry->second;
+        rwlockReadUnlock(this->m_rwlock);
+ 
+        return f ; 
+      }
+    }
+    rwlockReadUnlock(this->m_rwlock);
+
+    // key was not found! It must be created
+    std::pair<Entry,bool> ret;
+    
+    rwlockWriteLock(this->m_rwlock);
+
+    ret = this->m_map.insert ( std::make_pair(key,(F *) NULL) );
+
+    if (!ret.second)  {
+      // The key already exists! 
+      F * f  = ret.first->second ;
+      rwlockWriteUnlock(this->m_rwlock);
+      return f ; 
+    }
+
+    entry = ret.first;
+
+    // Reminder: the lookup() returns with the cache in a write-locked state
+    return NULL;    
+
+  };
+
+
+  // Fill a pending cache entry with a valid functor as provided by an
+  // unsuccessfull call to lookup().
+  void fillPendingEntry(Entry & entry, F * functor)
+  {
+    assert(functor != NULL) ;
+    entry->second = functor ;
+    rwlockWriteUnlock(this->m_rwlock);
+  }
+
+  // Drop a pending cache entry with a valid functor as provided by 
+  // an unsuccessfull call to lookup(). 
+  void dropPendingEntry(Entry & entry ) 
+  {
+    this->m_map.erase(entry) ;
+    rwlockWriteUnlock(this->m_rwlock);
+  }
+
+public: // Inherited members from clblasFunctorCacheBase
+
+  void discardAll() 
+  {
+     rwlockWriteLock(this->m_rwlock);
+     
+    while ( true ) 
+    {
+      Entry entry = this->m_map.begin() ;
+      if ( entry == this->m_map.end() ) 
+        break ; 
+      entry->second->release() ;
+      this->m_map.erase(entry) ;
+    }
+    rwlockWriteUnlock(this->m_rwlock);   
+  }
+
+
+};
+
+
+#endif // _CLBLAS_FUNCTOR_H_
diff --git a/src/library/blas/functor/include/functor_fill.h b/src/library/blas/functor/include/functor_fill.h
new file mode 100644
index 0000000..8ba84ab
--- /dev/null
+++ b/src/library/blas/functor/include/functor_fill.h
@@ -0,0 +1,99 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_FILL_H_
+#define _CLBLAS_FUNCTOR_FILL_H_
+
+#include "functor.h"
+
+//
+// The clblasFill2DFunctor provides a method to fill a rectangular part or a 
+// 2D matrix with a single element value.
+//
+// The element size shall be 1, 2, 4, 8 or 16. Other values are not (yet) supported
+//
+// The functor operates in ColumnMajor mode 
+//
+class clblasFill2DFunctor : public clblasFunctor 
+{
+public:
+
+  struct Args
+  {
+    cl_mem           A;
+    size_t           offA;         // offset in A (in elements) 
+    size_t           m ;           // number of rows
+    size_t           n ;           // number of columns
+    size_t           ldA ;         // distance between two columns (in elements)
+    int              elemsize ; 
+    const void *     value ;  
+    cl_command_queue queue;    
+    cl_uint          numEventsInWaitList;
+    const cl_event * eventWaitList;
+    cl_event *       events;
+
+    Args(cl_mem  A,
+         size_t  offA,   
+         size_t  m,
+         size_t  n,
+         size_t  ldA,          // distance between two columns (in elements)
+         int     elemsize, 
+         const void *  value,       // The fill value (elemsize bytes)
+         cl_command_queue queue,
+         cl_uint          numEventsInWaitList,
+         const cl_event * eventWaitList,
+         cl_event *       events) 
+      : A(A),
+        offA(offA),
+        m(m),
+        n(n),
+        ldA(ldA),
+        elemsize(elemsize),
+        value(value),
+        queue(queue),
+        numEventsInWaitList(numEventsInWaitList),
+        eventWaitList(eventWaitList),
+        events(events)        
+    {
+    }
+  } ;
+
+
+  virtual clblasStatus execute(Args & args) = 0;
+ 
+} ;
+
+//
+// A default portable implementation of clblasFill2DFunctor
+//
+class clblasFill2DFunctorDefault : public clblasFill2DFunctor
+{
+private:
+  clblasFill2DFunctorDefault(cl_context ctxt, 
+                             cl_device_id dev,
+                             int elemsize, 
+                             cl_int & err) ;
+  ~clblasFill2DFunctorDefault() ;
+public:  
+  static clblasFill2DFunctorDefault * provide(Args & args) ;  
+public:  
+  clblasStatus execute(Args & args) ;
+private:  
+  int        m_elemsize ;  // the element size. Will also be used as key in the cache
+  cl_program m_program ;   
+} ;
+
+#endif
diff --git a/src/library/blas/functor/include/functor_hawaii_dgemm_NT_MN48.h b/src/library/blas/functor/include/functor_hawaii_dgemm_NT_MN48.h
new file mode 100644
index 0000000..0d7d648
--- /dev/null
+++ b/src/library/blas/functor/include/functor_hawaii_dgemm_NT_MN48.h
@@ -0,0 +1,210 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <functor.h>
+#include <binary_lookup.h>
+#include <iostream>
+
+#define BUILD_KERNEL_FROM_STRING 0
+
+#if BUILD_KERNEL_FROM_STRING
+#include <dgemm_NT_MN48.clT>
+#else
+#include <dgemm_NT_MN48.spir.clT>
+#endif
+
+
+
+class clblasDgemmFunctorHawaii_NT_MN48 : public clblasDgemmFunctor 
+{
+private:  // Constructor & Destructor
+
+  clblasDgemmFunctorHawaii_NT_MN48(Args & args, cl_int & err);
+  ~clblasDgemmFunctorHawaii_NT_MN48();
+  
+public: // Members inherited from clblasDgemmFunctor 
+  virtual clblasStatus execute(Args & a);
+
+public: 
+  static clblasDgemmFunctorHawaii_NT_MN48 * provide(Args & args);
+
+private:
+
+  typedef clblasFunctorCache<clblasDgemmFunctorHawaii_NT_MN48,bool> Cache ;
+  static  Cache cache;
+
+private:  
+  cl_program program ;  
+};
+
+
+
+clblasDgemmFunctorHawaii_NT_MN48::Cache clblasDgemmFunctorHawaii_NT_MN48::cache;
+
+clblasDgemmFunctorHawaii_NT_MN48 * 
+clblasDgemmFunctorHawaii_NT_MN48::provide(clblasDgemmFunctor::Args & args)
+{
+  //Work only if TRANSA == N, TRANSB == T, M and N multiple of 48
+  //Note: Are K%48 == 0  LDA LDB %2 == 0 and OFFA OFFB %2 == 0 required?
+  bool applicable =  (args.transA == clblasNoTrans) 
+                  && (args.transB == clblasTrans) 
+                  && (args.M % 48 == 0) 
+                  && (args.N % 48 == 0)
+                  && (args.K % 48 == 0) 
+                  && (args.order == clblasColumnMajor) ;
+  if(!applicable)
+  {
+    return NULL;
+  }
+
+  cl_device_id dev;
+  cl_context   ctxt;
+
+  cl_int err = getDeviceAndContext(args.queue, dev, ctxt);
+
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+
+  Cache::Lookup lookup(cache, ctxt, dev, true ) ;
+
+  if ( lookup.ok() ){
+    clblasDgemmFunctorHawaii_NT_MN48 * functor = lookup.get();
+    functor->retain(); // increment the reference counter to avoid deletion while it is still beeing used
+    return functor;
+  }
+ 
+  clblasDgemmFunctorHawaii_NT_MN48 * functor = new clblasDgemmFunctorHawaii_NT_MN48(args, err);
+  if (err != CL_SUCCESS)
+  {
+      return NULL;
+  }
+
+  lookup.set(functor) ;
+
+  return functor;
+}
+
+clblasDgemmFunctorHawaii_NT_MN48::clblasDgemmFunctorHawaii_NT_MN48(Args & args, cl_int & err) :
+  program(0) 
+{
+  //Hawaii kernel here only for test.
+  //Work only if TRANSA == N, TRANSB == T, M and N multiple of 48
+  //Note: Are K%48 == 0  LDA LDB %2 == 0 and OFFA OFFB %2 == 0 required?
+
+  cl_device_id device;
+  cl_context context;
+
+  cl_command_queue queue = args.queue;
+  err = getDeviceAndContext(queue, device, context);
+  if( err != CL_SUCCESS )
+  {
+      return;
+  }
+
+  BinaryLookup bl(context, device, "clblasDgemmFunctorHawaii_NT_MN48");
+
+  bl.variantInt(48);
+
+  if ( !bl.found() ) // may create empty file or may wait until file is ready  
+  {
+#if BUILD_KERNEL_FROM_STRING
+    // directly build from a char* 
+    err = bl.buildFromSource(DGEMM_NT_MN48_KERNEL);
+#else
+    // build from compiled version of the kernel (SPIR)
+    err = bl.buildFromBinary(DGEMM_NT_MN48_SPIR_KERNEL, sizeof(DGEMM_NT_MN48_SPIR_KERNEL));
+#endif
+    if( err != CL_SUCCESS )
+    {
+      return;
+    }
+  }
+
+  this->program = bl.getProgram();
+}
+
+clblasDgemmFunctorHawaii_NT_MN48::~clblasDgemmFunctorHawaii_NT_MN48()
+{
+  if (this->program) {
+    clReleaseProgram( this->program ) ; 
+  } 
+}
+
+clblasStatus clblasDgemmFunctorHawaii_NT_MN48::execute(Args & args)
+{
+  cl_int err;
+  cl_command_queue queue = args.queue;
+
+  cl_kernel kernel = clCreateKernel( this->program, "dgemm",  &err);
+  if (err != CL_SUCCESS) return clblasStatus(err) ; 
+
+  int M = args.M, N = args.N, K = args.K;
+  int lda = args.lda, ldb = args.ldb, ldc = args.ldc;
+
+  int offsetA = args.offA;
+  int offsetB = args.offB;
+  int offsetC = args.offC;
+
+  setKernelArg<cl_mem>(kernel, 0, args.C);
+  setKernelArg<cl_mem>(kernel, 1, args.B);
+  setKernelArg<cl_mem>(kernel, 2, args.A);
+
+  setKernelArg<int>(kernel, 3, N);
+  setKernelArg<int>(kernel, 4, M);
+  setKernelArg<int>(kernel, 5, K);
+
+  setKernelArg<cl_double>(kernel, 6, args.alpha);
+  setKernelArg<cl_double>(kernel, 7, args.beta);
+
+  setKernelArg<int>(kernel,  8, ldc);
+  setKernelArg<int>(kernel,  9, ldb);
+  setKernelArg<int>(kernel, 10, lda);
+
+  setKernelArg<int>(kernel, 11, offsetC);
+  setKernelArg<int>(kernel, 12, offsetB);
+  setKernelArg<int>(kernel, 13, offsetA);
+
+  const size_t ls[2]  = {8, 8};
+  const size_t bwi[2] = {6, 6};
+
+  size_t globalThreads[2];
+
+  unsigned int thx, thy;
+
+  thx   = M/bwi[0] + ((M%bwi[0] != 0) ? 1 : 0);   // Each PE updates (bwi[0] x bwi[1])=(6 x 6) values
+  thx   = thx/ls[0] + ((thx%ls[0] != 0) ? 1 : 0); // Each work group is made of (ls[0] x ls[1])=(8 x 8) PE
+  thx   = ls[0] * thx;
+
+  thy   = N/bwi[1] + ((N%bwi[1] != 0) ? 1 : 0);   // Each PE updates (bwi[0] x bwi[1])=(6 x 6) values
+  thy   = thy/ls[1] + ((thy%ls[1] != 0) ? 1 : 0); // Each work group is made of (ls[0] x ls[1])=(8 x 8) PE
+  thy   = ls[1] * thy;
+
+  globalThreads[0] = thx;
+  globalThreads[1] = thy;
+
+  err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL,
+                               globalThreads, NULL , 
+                               args.numEventsInWaitList, 
+                               args.eventWaitList, 
+                               args.events);
+
+  clReleaseKernel(kernel) ;
+
+
+  return clblasStatus(err) ;
+}
diff --git a/src/library/blas/functor/include/functor_selector.h b/src/library/blas/functor/include/functor_selector.h
new file mode 100644
index 0000000..b0d03d0
--- /dev/null
+++ b/src/library/blas/functor/include/functor_selector.h
@@ -0,0 +1,149 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_SELECTOR_H_
+#define _CLBLAS_FUNCTOR_SELECTOR_H_
+
+#if defined(__APPLE__) || defined(__MACOSX)
+#include <OpenCL/opencl.h>
+#else
+#include <CL/opencl.h>
+#endif
+
+#include <stdio.h>
+#include <map>
+#include <devinfo.h>
+#include <functor.h>
+#include <functor_xgemm.h>
+#include <functor_xscal.h>
+#include <functor_xtrsm.h>
+#include <functor_fill.h>
+
+//
+// The purpose of clblasFunctorSelector is to provide some selection functions to  
+// choose amongst all functors available for a given task.
+//
+// Each BLAS function xxxx is typically associated to at least two virtual methods: 
+//
+//   - select_xxxx_generic() to select a generic functor applicable to all 
+//     possible arguments.
+//
+//   - select_xxxx_specific(...) to select the most optimized functor for the 
+//     specified arguments.
+//
+// A user willing to obtain a functor using one of those functions shall typically 
+// query the most appropriate selector using one of the static find() functions. 
+//
+// There is only one instance of the default clblasFunctorSelector that is used for 
+// devices without a specialized version. 
+//
+// So the clblasFunctorSelector is supposed to be derived once for each supported 
+// device architecture (e.g. FunctorSelectorTahiti for the AMD Tahiti GPU). Each 
+// of those derived class shall define a single global instance of itself that 
+// will register itself in a global table of all known functor selectors. 
+//
+// The specialized selector class shall then provide its own select virtual 
+// methods for which it exists a specialized implementation. Those specialized 
+// selection methods may fall back on the default method if they do not provide 
+// an optimized functor in all cases 
+//
+class clblasFunctorSelector 
+{
+protected:
+
+    // Constructor for the non-default instances specialized for a given device.     
+    clblasFunctorSelector(DeviceChip chip);
+
+private:
+
+    // This constructor is only for the default_instance
+    clblasFunctorSelector();    
+
+    // The selector default use when no specialized version exists
+    // for the current device 
+    static clblasFunctorSelector default_instance ;
+
+public:
+  
+    // Find the suitable functor selector for the specified queue  
+    static clblasFunctorSelector * find(cl_command_queue queue);
+
+    // Find the suitable functor selector for the specified device  
+    static clblasFunctorSelector * find(cl_device_id device);
+
+    // Find the suitable functor selector for the specified device architecture  
+    static clblasFunctorSelector * find(DeviceChip arch) ;
+
+    // Find if the device is a FirePro one. If not we will return the default functor which won't use the fast kernel for GCN
+    static int FindFirePro(cl_device_id device);
+
+public:
+
+    // Provide a XGEMM Functor usable in all cases 
+
+    virtual clblasSgemmFunctor * select_sgemm_generic();
+    virtual clblasDgemmFunctor * select_dgemm_generic();
+    virtual clblasCgemmFunctor * select_cgemm_generic();
+    virtual clblasZgemmFunctor * select_zgemm_generic();
+
+    // Provide XGEMM functors optimized for specific arguments
+
+    virtual clblasSgemmFunctor * select_sgemm_specific(clblasSgemmFunctor::Args & args);
+    virtual clblasDgemmFunctor * select_dgemm_specific(clblasDgemmFunctor::Args & args);
+    virtual clblasCgemmFunctor * select_cgemm_specific(clblasCgemmFunctor::Args & args);
+    virtual clblasZgemmFunctor * select_zgemm_specific(clblasZgemmFunctor::Args & args);
+
+
+    // Provide a XSCAL Functor usable in all cases 
+
+    virtual clblasSscalFunctor  * select_sscal_generic(clblasSscalFunctor::Args & args);
+    virtual clblasDscalFunctor  * select_dscal_generic(clblasDscalFunctor::Args & args);
+    virtual clblasCscalFunctor  * select_cscal_generic(clblasCscalFunctor::Args & args);
+    virtual clblasZscalFunctor  * select_zscal_generic(clblasZscalFunctor::Args & args);
+    virtual clblasCsscalFunctor * select_csscal_generic(clblasCsscalFunctor::Args & args);
+    virtual clblasZdscalFunctor * select_zdscal_generic(clblasZdscalFunctor::Args & args);
+
+    // Provide XSCAL functors optimized for specific arguments
+
+    virtual clblasSscalFunctor  * select_sscal_specific(clblasSscalFunctor::Args & args);
+    virtual clblasDscalFunctor  * select_dscal_specific(clblasDscalFunctor::Args & args);
+    virtual clblasCscalFunctor  * select_cscal_specific(clblasCscalFunctor::Args & args);
+    virtual clblasZscalFunctor  * select_zscal_specific(clblasZscalFunctor::Args & args);
+    virtual clblasCsscalFunctor * select_csscal_specific(clblasCsscalFunctor::Args & args);
+    virtual clblasZdscalFunctor * select_zdscal_specific(clblasZdscalFunctor::Args & args);
+
+        // Provide a XGEMM Functor usable in all cases 
+
+    virtual clblasStrsmFunctor * select_strsm_generic();
+    virtual clblasDtrsmFunctor * select_dtrsm_generic();
+    virtual clblasCtrsmFunctor * select_ctrsm_generic();
+    virtual clblasZtrsmFunctor * select_ztrsm_generic();
+
+    // Provide XTRSM functors optimized for specific arguments
+
+    virtual clblasStrsmFunctor * select_strsm_specific(clblasStrsmFunctor::Args & args);
+    virtual clblasDtrsmFunctor * select_dtrsm_specific(clblasDtrsmFunctor::Args & args);
+    virtual clblasCtrsmFunctor * select_ctrsm_specific(clblasCtrsmFunctor::Args & args);
+    virtual clblasZtrsmFunctor * select_ztrsm_specific(clblasZtrsmFunctor::Args & args);
+
+    // Provide functor to perform non-contiguous fill in a 2D matrix
+    virtual clblasFill2DFunctor * select_fill2d_specific(clblasFill2DFunctor::Args & args);
+
+};
+
+
+
+#endif // _CLBLAS_FUNCTOR_SELECTOR_H_
diff --git a/src/library/blas/functor/include/functor_utils.h b/src/library/blas/functor/include/functor_utils.h
new file mode 100644
index 0000000..c685f05
--- /dev/null
+++ b/src/library/blas/functor/include/functor_utils.h
@@ -0,0 +1,116 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_UTILS_
+#define _CLBLAS_FUNCTOR_UTILS_
+
+#include <clblas-internal.h>
+
+static inline clblasStatus checkQueues(cl_uint numCommandQueues,
+                                       cl_command_queue *commandQueues)
+{
+    if (numCommandQueues<=0)
+    {
+        return clblasInvalidCommandQueue;
+    }
+                                                                        
+    if (commandQueues == 0)
+    {
+        return clblasInvalidCommandQueue;
+    }
+
+    for (cl_uint i=0 ; i<numCommandQueues ; ++i)
+    {
+        if (commandQueues[i] == 0)
+        {
+            return clblasInvalidCommandQueue;
+        }
+    }
+    return clblasSuccess ;
+}
+
+static inline clblasStatus checkEventWaitList(cl_uint numEventsInWaitList,
+                                              const cl_event *eventWaitList)
+{                                                                        
+    if (numEventsInWaitList>0 && eventWaitList == 0)
+    {
+        return clblasInvalidEventWaitList;
+    }
+
+    for (cl_uint i=0 ; i<numEventsInWaitList ; ++i)
+    {
+        if (eventWaitList[i] == 0)
+        {
+            return clblasInvalidEventWaitList;
+        }
+    }
+    return clblasSuccess ;
+}
+
+static inline clblasStatus checkMatrixA(DataType dtype, clblasOrder order, clblasTranspose trans, cl_mem mat, size_t size1, size_t size2, size_t off, size_t ld)
+{
+    return checkMatrixSizes(dtype, order, trans, size1, size2, mat, off, ld, A_MAT_ERRSET);
+}
+
+static inline clblasStatus checkMatrixB(DataType dtype, clblasOrder order, clblasTranspose trans, cl_mem mat, size_t size1, size_t size2, size_t off, size_t ld)
+{
+    return checkMatrixSizes(dtype, order, trans, size1, size2, mat, off, ld, B_MAT_ERRSET);
+}
+
+static inline clblasStatus checkMatrixC(DataType dtype, clblasOrder order, clblasTranspose trans, cl_mem mat, size_t size1, size_t size2, size_t off, size_t ld)
+{
+    return checkMatrixSizes(dtype, order, trans, size1, size2, mat, off, ld, C_MAT_ERRSET);
+}
+
+static inline clblasStatus checkVectorX(DataType dtype, size_t N, cl_mem X, size_t offx, int incx)
+{
+    return checkVectorSizes(dtype, N, X, offx, incx, X_VEC_ERRSET);
+}
+
+static inline clblasStatus checkVectorY(DataType dtype, size_t N, cl_mem Y, size_t offy, int incy)
+{
+    return checkVectorSizes(dtype, N, Y, offy, incy, Y_VEC_ERRSET);
+}
+
+#define RETURN_ON_ERROR(EXPR)          \
+    do {                               \
+       clblasStatus err = EXPR ;       \
+       if (err != clblasSuccess)       \
+           return err ;                \
+    } while(0);
+
+#define CHECK_QUEUES(numCommandQueues, commandQueues)                    \
+   RETURN_ON_ERROR( checkQueues(numCommandQueues, commandQueues) )
+
+#define CHECK_EVENTS(numEventsInWaitList, eventWaitList)                 \
+   RETURN_ON_ERROR( checkEventWaitList(numEventsInWaitList, eventWaitList) )
+
+#define CHECK_MATRIX_A(type, order, trans, mat, size1, size2, off, ld)   \
+    RETURN_ON_ERROR( checkMatrixA(type, order, trans, mat, size1, size2, off, ld) )
+
+#define CHECK_MATRIX_B(type, order, trans, mat, size1, size2, off, ld)   \
+    RETURN_ON_ERROR( checkMatrixB(type, order, trans, mat, size1, size2, off, ld) )
+
+#define CHECK_MATRIX_C(type, order, trans, mat, size1, size2, off, ld)   \
+    RETURN_ON_ERROR( checkMatrixC(type, order, trans, mat, size1, size2, off, ld) )
+
+#define CHECK_VECTOR_X(type, N, X, offx, incx)                           \
+    RETURN_ON_ERROR( checkVectorX(type, N, X, offx, incx) )
+
+#define CHECK_VECTOR_Y(N, Y, offy, incy)                                 \
+   RETURN_ON_ERROR( checkVectorY(type, N, Y, offy, incy) )
+
+#endif // _CLBLAS_FUNCTOR_UTILS_
diff --git a/src/library/blas/functor/include/functor_xgemm.h b/src/library/blas/functor/include/functor_xgemm.h
new file mode 100644
index 0000000..8a365e4
--- /dev/null
+++ b/src/library/blas/functor/include/functor_xgemm.h
@@ -0,0 +1,213 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_XGEMM_H_
+#define _CLBLAS_FUNCTOR_XGEMM_H_
+
+//
+// This file provides the declarations of all XGEMM functors and related classes.
+//
+//
+//
+
+
+#include "functor.h"
+
+
+//
+// Base class for all XGEMM functors (DGEMM, SGEMM, ...) 
+//
+template <class T>
+class clblasXgemmFunctor : public clblasFunctor 
+{
+  public:
+
+    // Structure used to store all XGEMM arguments
+    struct Args
+    {
+        clblasOrder      order;
+        clblasTranspose  transA;
+        clblasTranspose  transB;
+        size_t           M;
+        size_t           N;
+        size_t           K;
+        T                alpha;
+        cl_mem           A;
+        size_t           offA;
+        size_t           lda;
+        cl_mem           B;
+        size_t           offB;
+        size_t           ldb;
+        T                beta;
+        cl_mem           C;
+        size_t           offC;
+        size_t           ldc;
+        cl_command_queue queue;
+        cl_uint          numEventsInWaitList;
+        const cl_event * eventWaitList;
+        cl_event *       events;
+
+        Args(clblasOrder order,
+             clblasTranspose transA,
+             clblasTranspose transB,
+             size_t M,
+             size_t N,
+             size_t K,
+             T alpha,
+             cl_mem A,
+             size_t offA,
+             size_t lda,
+             cl_mem B,
+             size_t offB,
+             size_t ldb,
+             T beta,
+             cl_mem C,
+             size_t offC,
+             size_t ldc,
+             cl_command_queue queue,
+             cl_uint numEventsInWaitList,
+             const cl_event *eventWaitList,
+             cl_event *events)
+            : order(order),
+              transA(transA),
+              transB(transB),
+              M(M),
+              N(N),
+              K(K),
+              alpha(alpha),
+              A(A),
+              offA(offA),
+              lda(lda),
+              B(B),
+              offB(offB),
+              ldb(ldb),
+              beta(beta),
+              C(C),
+              offC(offC),
+              ldc(ldc),
+              queue(queue),
+              numEventsInWaitList(numEventsInWaitList),
+              eventWaitList(eventWaitList),
+              events(events)
+        {
+        }
+    };
+
+public:
+
+    virtual clblasStatus execute(Args &args) = 0;
+
+} ;
+
+// ================  SGEMM ==================
+
+//
+// Base class for all functors providing a SGEMM implementation
+//
+class clblasSgemmFunctor : public clblasXgemmFunctor<cl_float>
+{
+};
+
+//
+// Fallback functor for SGEMM using the original solver mechanism
+//
+class clblasSgemmFunctorFallback : public clblasSgemmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasSgemmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasSgemmFunctorFallback * provide ();
+};
+
+
+
+// ================  DGEMM ==================
+
+//
+//
+// Base class for all functors providing a DGEMM implementation
+//
+class clblasDgemmFunctor : public clblasXgemmFunctor<cl_double>
+{
+};
+
+//
+// Fallback functor for DGEMM using the original solver mechanism
+//
+class clblasDgemmFunctorFallback : public clblasDgemmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasDgemmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasDgemmFunctorFallback * provide ();
+};
+
+
+// ================  CGEMM ==================
+
+//
+// Base class for all functors providing a CGEMM implementation
+//
+class clblasCgemmFunctor : public clblasXgemmFunctor<FloatComplex>
+{
+};
+
+//
+// Fallback functor for CGEMM using the original solver mechanism
+//
+class clblasCgemmFunctorFallback : public clblasCgemmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasCgemmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasCgemmFunctorFallback * provide ();
+};
+
+
+// ================  ZGEMM ==================
+
+//
+// Base class for all functors providing a ZGEMM implementation
+//
+class clblasZgemmFunctor : public clblasXgemmFunctor<DoubleComplex>
+{
+};
+
+//
+// Fallback functor for ZGEMM using the original solver mechanism
+//
+class clblasZgemmFunctorFallback : public clblasZgemmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasZgemmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasZgemmFunctorFallback * provide ();
+};
+
+
+#endif // _CLBLAS_FUNCTOR_XGEMM_H_
diff --git a/src/library/blas/functor/include/functor_xscal.h b/src/library/blas/functor/include/functor_xscal.h
new file mode 100644
index 0000000..5d12e32
--- /dev/null
+++ b/src/library/blas/functor/include/functor_xscal.h
@@ -0,0 +1,207 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_XSCAL_H_
+#define _CLBLAS_FUNCTOR_XSCAL_H_
+
+//
+// This file provides the declarations of all XSCAL functors and related classes.
+//
+//
+//
+
+
+#include <functor.h>
+
+
+//
+// Base class for all XSCAL functors (DSCAL, SSCAL, ...) 
+//
+template<typename TX, typename Talpha> 
+class clblasXscalFunctor : public clblasFunctor 
+{
+public:
+
+  // Structure used to store all XSCAL arguments
+  struct Args
+  {
+    size_t           N;
+    Talpha           alpha;
+    cl_mem           X;
+    size_t           offx;
+    int              incx;
+    cl_command_queue queue;
+    cl_uint          numEventsInWaitList;
+    const cl_event * eventWaitList;
+    cl_event *       events;
+  
+    Args(size_t N,
+         Talpha alpha,
+         cl_mem X,
+         size_t offx,
+         int    incx,
+         cl_command_queue queue,
+         cl_uint          numEventsInWaitList,
+         const cl_event * eventWaitList,
+         cl_event *       events) 
+      : N(N),
+        alpha(alpha),
+        X(X),
+        offx(offx),
+        incx(incx),
+        queue(queue),
+        numEventsInWaitList(numEventsInWaitList),
+        eventWaitList(eventWaitList),
+        events(events)
+    {
+    }
+  };
+
+  virtual clblasStatus execute(Args & args) = 0;
+};
+
+
+
+//
+// Base class for all functors providing a SSCAL implementation
+//
+class clblasSscalFunctor: public clblasXscalFunctor<cl_float, cl_float>
+{
+};
+
+//
+// Base class for all functors providing a DSCAL implementation
+//
+class clblasDscalFunctor: public clblasXscalFunctor<cl_double, cl_double>
+{
+};
+
+//
+// Base class for all functors providing a CSCAL implementation
+//
+class clblasCscalFunctor: public clblasXscalFunctor<cl_float2, cl_float2>
+{
+};
+
+//
+// Base class for all functors providing a ZSCAL implementation
+//
+class clblasZscalFunctor: public clblasXscalFunctor<cl_double2, cl_double2>
+{
+};
+
+//
+// Base class for all functors providing a CSSCAL implementation
+//
+class clblasCsscalFunctor: public clblasXscalFunctor<cl_float2, cl_float>
+{
+};
+
+//
+// Base class for all functors providing a ZDSCAL implementation
+//
+class clblasZdscalFunctor: public clblasXscalFunctor<cl_double2, cl_double>
+{
+};
+
+
+
+//
+// Fallback functor for SSCAL : implement the sscal using the old solver mechanism
+//
+class clblasSscalFunctorFallback : public clblasSscalFunctor 
+{
+  public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+  public:  // Inherited members from clblasSscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasSscalFunctorFallback * provide ();
+};
+
+//
+// Fallback functor for DSCAL : implement the dscal using the old solver mechanism
+//
+class clblasDscalFunctorFallback : public clblasDscalFunctor 
+{
+  public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+  public:  // Inherited members from clblasDscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasDscalFunctorFallback * provide ();
+};
+
+//
+// Fallback functor for CSCAL : implement the Cscal using the old solver mechanism
+//
+class clblasCscalFunctorFallback : public clblasCscalFunctor 
+{
+  public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+  public:  // Inherited members from clblasCscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasCscalFunctorFallback * provide ();
+};
+
+//
+// Fallback functor for ZSCAL : implement the zscal using the old solver mechanism
+//
+class clblasZscalFunctorFallback : public clblasZscalFunctor 
+{
+  public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+  public:  // Inherited members from clblasZscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasZscalFunctorFallback * provide ();
+};
+
+//
+// Fallback functor for CSSCAL : implement the Csscal using the old solver mechanism
+//
+class clblasCsscalFunctorFallback : public clblasCsscalFunctor 
+{
+  public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+  public:  // Inherited members from clblasCsscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasCsscalFunctorFallback * provide ();
+};
+
+//
+// Fallback functor for ZDSCAL : implement the zdscal using the old solver mechanism
+//
+class clblasZdscalFunctorFallback : public clblasZdscalFunctor 
+{
+  public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+  public:  // Inherited members from clblasZdscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasZdscalFunctorFallback * provide ();
+};
+
+
+#endif // _CLBLAS_FUNCTOR_XSCAL_H_
diff --git a/src/library/blas/functor/include/functor_xscal_generic.h b/src/library/blas/functor/include/functor_xscal_generic.h
new file mode 100644
index 0000000..ae77392
--- /dev/null
+++ b/src/library/blas/functor/include/functor_xscal_generic.h
@@ -0,0 +1,173 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_XSCAL_GENERIC_H_
+#define _CLBLAS_FUNCTOR_XSCAL_GENERIC_H_
+
+#include <functor_xscal.h>
+
+
+// A POD type used to index the functors below
+struct _clblasXscalFunctorGenericData 
+{
+  int  vecLen  ;
+  bool doVLOAD ;
+  bool noUnity ;
+  // operator< is needed for the cache 
+  bool operator<(const _clblasXscalFunctorGenericData &b) const 
+  {
+    const  _clblasXscalFunctorGenericData &a = *this ;    
+    if ( a.vecLen  != b.vecLen  ) return a.vecLen  < b.vecLen  ;
+    if ( a.doVLOAD != b.doVLOAD ) return a.doVLOAD < b.doVLOAD ;
+    if ( a.noUnity != b.noUnity ) return a.noUnity < b.noUnity ;
+    return false ;
+  }  
+} ;
+
+//
+// Generic functor for SSCAL : implement the sscal using kprintf generator
+//
+class clblasSscalFunctorGeneric : public clblasSscalFunctor 
+{ 
+  public: 
+    typedef _clblasXscalFunctorGenericData Data ;  
+    Data data;
+  public:  // Constructor & Destructor
+    clblasSscalFunctorGeneric(cl_context ctxt, cl_device_id dev, const Data & data, cl_int & err);
+    ~clblasSscalFunctorGeneric();
+  public:  // Inherited members from clblasSscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasSscalFunctorGeneric * provide (Args & a);
+  public:
+    typedef clblasFunctorCache<clblasSscalFunctorGeneric, Data> Cache;
+    static Cache cache;
+  public:  
+    cl_program program;
+};
+
+//
+// Generic functor for DSCAL : implement the dscal using the kprintf generator
+//
+class clblasDscalFunctorGeneric : public clblasDscalFunctor 
+{
+  public: 
+    typedef _clblasXscalFunctorGenericData Data ;  
+    Data data;
+  public:  // Constructor & Destructor
+    clblasDscalFunctorGeneric(cl_context ctxt, cl_device_id dev, const Data & data, cl_int & err);
+    ~clblasDscalFunctorGeneric();
+  public:  // Inherited members from clblasDscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasDscalFunctorGeneric * provide (Args & a);
+  public:
+    typedef clblasFunctorCache<clblasDscalFunctorGeneric, Data> Cache;
+    static Cache cache;
+  public:  
+    cl_program program;
+};
+
+//
+// Generic functor for CSCAL : implement the Cscal using the kprintf generator
+//
+class clblasCscalFunctorGeneric : public clblasCscalFunctor 
+{
+  public: 
+    typedef _clblasXscalFunctorGenericData Data ;  
+    Data data;
+  public:  // Constructor & Destructor
+    clblasCscalFunctorGeneric(cl_context ctxt, cl_device_id dev, const Data & data, cl_int & err);
+    ~clblasCscalFunctorGeneric();
+  public:  // Inherited members from clblasCscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasCscalFunctorGeneric * provide (Args & a);
+  public:
+    typedef clblasFunctorCache<clblasCscalFunctorGeneric, Data> Cache;
+    static Cache cache;
+  public:  
+    cl_program program;
+};
+
+//
+// Generic functor for ZSCAL : implement the zscal using the kprintf generator
+//
+class clblasZscalFunctorGeneric : public clblasZscalFunctor 
+{
+  public: 
+    typedef _clblasXscalFunctorGenericData Data ;  
+    Data data;
+  public:  // Constructor & Destructor
+    clblasZscalFunctorGeneric(cl_context ctxt, cl_device_id dev, const Data & data, cl_int & err);
+    ~clblasZscalFunctorGeneric();
+  public:  // Inherited members from clblasZscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasZscalFunctorGeneric * provide (Args & a);
+  public:
+    typedef clblasFunctorCache<clblasZscalFunctorGeneric, Data> Cache;
+    static Cache cache;
+  public:  
+    cl_program program;
+};
+
+//
+// Generic functor for CSSCAL : implement the Csscal using the kprintf generator
+//
+class clblasCsscalFunctorGeneric : public clblasCsscalFunctor 
+{
+  public: 
+    typedef _clblasXscalFunctorGenericData Data ;  
+    Data data;
+  public:  // Constructor & Destructor
+    clblasCsscalFunctorGeneric(cl_context ctxt, cl_device_id dev, const Data & data, cl_int & err);
+    ~clblasCsscalFunctorGeneric();
+  public:  // Inherited members from clblasCsscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasCsscalFunctorGeneric * provide (Args & a);
+  public:
+    typedef clblasFunctorCache<clblasCsscalFunctorGeneric, Data> Cache;
+    static Cache cache;
+  public:  
+    cl_program program;
+};
+
+//
+// Generic functor for ZDSCAL : implement the zdscal using the kprintf generator
+//
+class clblasZdscalFunctorGeneric : public clblasZdscalFunctor 
+{
+  public: 
+    typedef _clblasXscalFunctorGenericData Data ;  
+    Data data;
+  public:  // Constructor & Destructor
+    clblasZdscalFunctorGeneric(cl_context ctxt, cl_device_id dev, const Data & data, cl_int & err);
+    ~clblasZdscalFunctorGeneric();
+  public:  // Inherited members from clblasZdscalFunctor 
+    virtual clblasStatus execute(Args & a);
+  public:
+    static clblasZdscalFunctorGeneric * provide (Args & a);
+  public:
+    typedef clblasFunctorCache<clblasZdscalFunctorGeneric, Data> Cache;
+    static Cache cache;
+  public:  
+    cl_program program;
+};
+
+
+#endif // _CLBLAS_FUNCTOR_XSCAL_GENERIC_H_
diff --git a/src/library/blas/functor/include/functor_xtrsm.h b/src/library/blas/functor/include/functor_xtrsm.h
new file mode 100644
index 0000000..181a46b
--- /dev/null
+++ b/src/library/blas/functor/include/functor_xtrsm.h
@@ -0,0 +1,203 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#ifndef _CLBLAS_FUNCTOR_XTRSM_H_
+#define _CLBLAS_FUNCTOR_XTRSM_H_
+
+//
+// This file provides the declarations of all XTRSM functors and related classes.
+//
+//
+//
+
+
+#include "functor.h"
+
+
+//
+// Base class for all XTRSM functors (DTRSM, STRSM, ...) 
+//
+template <class T>
+class clblasXtrsmFunctor : public clblasFunctor 
+{
+  public:
+
+    // Structure used to store all XTRSM arguments
+    struct Args
+    {
+
+      clblasOrder order;
+      clblasSide side;
+      clblasUplo uplo;
+      clblasTranspose transA;
+      clblasDiag diag;
+      size_t M;
+      size_t N;
+      T alpha;
+      cl_mem A;
+      size_t offA;
+      size_t lda;
+      cl_mem B;
+      size_t offB;
+      size_t ldb;
+      cl_command_queue queue;
+      cl_uint          numEventsInWaitList;
+      const cl_event * eventWaitList;
+      cl_event *       events;
+
+      Args(clblasOrder order,
+           clblasSide side,
+           clblasUplo uplo,
+           clblasTranspose transA,
+           clblasDiag diag,
+           size_t M,
+           size_t N,
+           T alpha,
+           cl_mem A,
+           size_t offA,
+           size_t lda,
+           cl_mem B,
+           size_t offB,
+           size_t ldb,
+           cl_command_queue queue,
+           cl_uint          numEventsInWaitList,
+           const cl_event * eventWaitList,
+           cl_event *       events)
+         : order(order),
+           side(side),
+           uplo(uplo),
+           transA(transA),
+           diag(diag),
+           M(M),
+           N(N),
+           alpha(alpha),
+           A(A),
+           offA(offA),
+           lda(lda),
+           B(B),
+           offB(offB),
+           ldb(ldb),
+           queue(queue),
+           numEventsInWaitList(numEventsInWaitList),
+           eventWaitList(eventWaitList),
+           events(events)
+           
+        {
+        }
+    };
+
+public:
+
+    virtual clblasStatus execute(Args &args) = 0;
+
+} ;
+
+// ================  STRSM ==================
+
+//
+// Base class for all functors providing a STRSM implementation
+//
+class clblasStrsmFunctor : public clblasXtrsmFunctor<cl_float>
+{
+};
+
+//
+// Fallback functor for STRSM using the original solver mechanism
+//
+class clblasStrsmFunctorFallback : public clblasStrsmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasStrsmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasStrsmFunctorFallback * provide ();
+};
+
+
+// ================  DTRSM ==================
+
+//
+// Base class for all functors providing a DTRSM implementation
+//
+class clblasDtrsmFunctor : public clblasXtrsmFunctor<cl_double>
+{
+};
+
+//
+// Fallback functor for DTRSM using the original solver mechanism
+//
+class clblasDtrsmFunctorFallback : public clblasDtrsmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasDtrsmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasDtrsmFunctorFallback * provide ();
+};
+
+
+// ================  CTRSM ==================
+
+//
+// Base class for all functors providing a CTRSM implementation
+//
+class clblasCtrsmFunctor : public clblasXtrsmFunctor<FloatComplex>
+{
+};
+
+//
+// Fallback functor for CTRSM using the original solver mechanism
+//
+class clblasCtrsmFunctorFallback : public clblasCtrsmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasCtrsmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasCtrsmFunctorFallback * provide ();
+};
+
+// ================  ZTRSM ==================
+
+//
+// Base class for all functors providing a ZTRSM implementation
+//
+class clblasZtrsmFunctor : public clblasXtrsmFunctor<DoubleComplex>
+{
+};
+
+//
+// Fallback functor for ZTRSM using the original solver mechanism
+//
+class clblasZtrsmFunctorFallback : public clblasZtrsmFunctor 
+{
+public:   // Inherited members from clblasFunctor 
+    virtual void retain();  
+    virtual void release();
+ public:  // Inherited members from clblasZtrsmFunctor 
+    virtual clblasStatus execute(Args & a);
+ public:
+    static clblasZtrsmFunctorFallback * provide ();
+};
+
+
+#endif // _CLBLAS_FUNCTOR_XTRSM_H_
diff --git a/src/library/blas/functor/include/gcn_dgemm.h b/src/library/blas/functor/include/gcn_dgemm.h
new file mode 100644
index 0000000..a11b619
--- /dev/null
+++ b/src/library/blas/functor/include/gcn_dgemm.h
@@ -0,0 +1,59 @@
+#ifndef CLBLASDGEMMFUNCTORGCN
+#define CLBLASDGEMMFUNCTORGCN
+#include <string>
+
+class clblasDgemmFunctorGCN : public clblasDgemmFunctor 
+{
+public:
+  
+  //
+  // A structure that describes a kernel variant. 
+  //
+  // It is important that all instances of those structures shall 
+  // be const and static because their addresses are used as keys 
+  // in the internal functor cache. 
+  // 
+  // Also, they shall all have a unique kernel name.
+  // 
+  struct Variant 
+  {    
+    const char *    kernel_name ;  
+    const char *    source ;   // the kernel source (shall be unique)
+    const char *    build_options;
+    const char *    bin ; 
+    size_t          bin_size ; 
+    clblasTranspose transA ;   //
+    clblasTranspose transB ;   //
+    unsigned        divN ;     // Required divisor of N  (use 1 when N can be of any value) 
+    unsigned        divM ;     // Required divisor of M  (use 1 when M can be of any value) 
+    unsigned        divK ;     // Required divisor of K  (use 1 when K can be of any value) 
+    size_t          ls[2]  ;   // Local size (the work-group size)
+    size_t          bwi[2] ;   // Block size work-item:  Number of elements calculated by each work items 
+                               // So basically each kernel is computing a block of
+                               //   (ls[0]*bwi[0]) x (ls[1]*bwi[1])  
+                               // elements of C. 
+    std::string mult;
+    
+  } ;
+  
+private:  // Constructor & Destructor
+
+  //clblasDgemmFunctorGCN(Args & args, const Variant * variant, cl_int & err) ;
+  
+public:
+
+  // Provide a suitable clblasDgemmFunctorGCN for the specified args
+  // or NULL if none 
+  //static clblasDgemmFunctorGCN * provide(clblasDgemmFunctor::Args & args, const char* DevName) ;
+
+public: // inherited member from clblasDgemmFunctor
+
+  virtual clblasStatus execute(Args &args) ;
+
+protected:
+
+  cl_program      m_program ;  
+  const Variant * m_variant ; // Pointer to a 'const static' object describing the kernel variant. 
+} ; 
+
+#endif
diff --git a/src/library/blas/functor/include/gcn_dgemmCommon.h b/src/library/blas/functor/include/gcn_dgemmCommon.h
new file mode 100644
index 0000000..533b1ef
--- /dev/null
+++ b/src/library/blas/functor/include/gcn_dgemmCommon.h
@@ -0,0 +1,22 @@
+#ifndef DGEMMMGCNCOMMON
+#define DGEMMMGCNCOMMON
+
+#include "gcn_dgemm.h"
+
+
+class clBlasGCNdgemmCommonFunctor  : public clblasDgemmFunctorGCN 
+{
+
+  private:  // Constructor & Destructor
+
+  clBlasGCNdgemmCommonFunctor(Args & args, const Variant * variant, cl_int & err) ;
+  
+public:
+
+  // Provide a suitable hawaii_dgemmChannelConflict for the specified args
+  // or NULL if none 
+  static clBlasGCNdgemmCommonFunctor * provide(clblasDgemmFunctor::Args & args, const char* DevName) ;
+
+};
+
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/include/gcn_dgemmSmallMatrices.h b/src/library/blas/functor/include/gcn_dgemmSmallMatrices.h
new file mode 100644
index 0000000..126de5b
--- /dev/null
+++ b/src/library/blas/functor/include/gcn_dgemmSmallMatrices.h
@@ -0,0 +1,27 @@
+#ifndef GCN_DGEMMMSMALLMATRICES
+#define GCN_DGEMMMSMALLMATRICES
+
+#include "gcn_dgemm.h"
+
+
+class clBlasGCNDgemmSmallMatricesFunctor  : public clblasDgemmFunctorGCN 
+{
+public:
+
+
+
+  private:  // Constructor & Destructor
+
+  clBlasGCNDgemmSmallMatricesFunctor(Args & args, const Variant * variant, cl_int & err) ;
+  //cl_int KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[4], Args &args);
+  
+public:
+
+  // Provide a suitable hawaii_dgemmChannelConflict for the specified args
+  // or NULL if none 
+  static clBlasGCNDgemmSmallMatricesFunctor * provide(clblasDgemmFunctor::Args & args, const char* DevName) ;
+  virtual clblasStatus execute(Args &args) ;
+
+};
+
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/include/gcn_sgemm.h b/src/library/blas/functor/include/gcn_sgemm.h
new file mode 100644
index 0000000..d934c62
--- /dev/null
+++ b/src/library/blas/functor/include/gcn_sgemm.h
@@ -0,0 +1,62 @@
+#ifndef CLBLASSGEMMFUNCTORGCN
+#define CLBLASSGEMMFUNCTORGCN
+#include <string>
+
+class clblasSgemmFunctorGCN : public clblasSgemmFunctor 
+{
+public:
+  
+  //
+  // A structure that describes a kernel variant. 
+  //
+  // It is important that all instances of those structures shall 
+  // be const and static because their addresses are used as keys 
+  // in the internal functor cache. 
+  // 
+  // Also, they shall all have a unique kernel name.
+  // 
+  struct Variant 
+  {    
+    const char *    kernel_name ;  
+    const char *    source ;   // the kernel source (shall be unique)
+    const char *    build_options;
+    const char *    bin ; 
+    size_t          bin_size ; 
+    clblasTranspose transA ;   //
+    clblasTranspose transB ;   //
+    unsigned        divN ;     // Required divisor of N  (use 1 when N can be of any value) 
+    unsigned        divM ;     // Required divisor of M  (use 1 when M can be of any value) 
+    unsigned        divK ;     // Required divisor of K  (use 1 when K can be of any value) 
+    size_t          ls[2]  ;   // Local size (the work-group size)
+    size_t          bwi[2] ;   // Block size work-item:  Number of elements calculated by each work items 
+                               // So basically each kernel is computing a block of
+                               //   (ls[0]*bwi[0]) x (ls[1]*bwi[1])  
+                               // elements of C. 
+    std::string mult;
+  } ;
+  
+private:  // Constructor & Destructor
+
+  clblasSgemmFunctorGCN(Args & args, const Variant * variant, cl_int & err) ;
+  
+public:
+
+  // Provide a suitable clblasDgemmFunctorGCN for the specified args
+  // or NULL if none 
+  static clblasSgemmFunctorGCN * provide(clblasSgemmFunctor::Args & args, const char* DevName) ;
+
+public: // inherited member from clblasDgemmFunctor
+
+  virtual clblasStatus execute(Args &args) ;
+
+protected:
+  //we need a default constructor as we derive this class, 
+  //but we can't use the specific constructor as the arguments won't be the same (variant!!!).
+  //Maybe it worth revisiting this class to have something cleaner
+  clblasSgemmFunctorGCN(){};
+  cl_program      m_program ;
+protected:
+  const Variant * m_variant ; // Pointer to a 'const static' object describing the kernel variant. 
+} ; 
+
+#endif
diff --git a/src/library/blas/functor/include/gcn_sgemmSmallMatrices.h b/src/library/blas/functor/include/gcn_sgemmSmallMatrices.h
new file mode 100644
index 0000000..91cec9e
--- /dev/null
+++ b/src/library/blas/functor/include/gcn_sgemmSmallMatrices.h
@@ -0,0 +1,27 @@
+#ifndef GCN_SGEMMMSMALLMATRICES
+#define GCN_SGEMMMSMALLMATRICES
+
+#include "gcn_sgemm.h"
+
+
+class clBlasGCNSgemmSmallMatricesFunctor  : public clblasSgemmFunctorGCN 
+{
+public:
+
+
+
+  private:  // Constructor & Destructor
+
+  clBlasGCNSgemmSmallMatricesFunctor(Args & args, const Variant * variant, cl_int & err) ;
+  //cl_int KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[4], Args &args);
+  
+public:
+
+  // Provide a suitable hawaii_dgemmChannelConflict for the specified args
+  // or NULL if none 
+  static clBlasGCNSgemmSmallMatricesFunctor * provide(clblasSgemmFunctor::Args & args, const char* DevName) ;
+  virtual clblasStatus execute(Args &args) ;
+
+};
+
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/include/gpu_dtrsm.h b/src/library/blas/functor/include/gpu_dtrsm.h
new file mode 100644
index 0000000..af39c1c
--- /dev/null
+++ b/src/library/blas/functor/include/gpu_dtrsm.h
@@ -0,0 +1,28 @@
+#ifndef _CLBLAS_DTRSM_FUNCTOR_GPU_H_
+#define _CLBLAS_DTRSM_FUNCTOR_GPU_H_
+
+class clblasDtrsmFunctorGpu : public clblasDtrsmFunctor 
+{
+public:
+  
+    
+private:  // Constructor & Destructor
+
+  clblasDtrsmFunctorGpu(Args & args, cl_int & err, const char* DevName,  cl_uint _64BitsUse) ;
+  
+public:
+
+  // Provide a suitable clblasDtrsmFunctorTahiti for the specified args
+  // or NULL if none 
+  static clblasDtrsmFunctorGpu * provide(clblasDtrsmFunctor::Args & args, const char* DevName) ;
+
+public: // inherited member from clblasDtrsmFunctor
+
+  virtual clblasStatus execute(Args &args) ;
+
+private:
+
+  cl_program      m_program ;  
+} ; 
+
+#endif
diff --git a/src/library/blas/functor/include/hawaii.h b/src/library/blas/functor/include/hawaii.h
new file mode 100644
index 0000000..b75ef63
--- /dev/null
+++ b/src/library/blas/functor/include/hawaii.h
@@ -0,0 +1,41 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#ifndef _CLBLAS_FUNCTION_SELECTOR_HAWAII_
+#define _CLBLAS_FUNCTION_SELECTOR_HAWAII_
+
+#include <functor_selector.h>
+//#include <functor_hawaii_dgemm_NT_MN48.h>
+
+class FunctorSelectorHawaii : public clblasFunctorSelector
+{
+private:
+    FunctorSelectorHawaii();
+
+    static FunctorSelectorHawaii instance;
+
+public:
+    // Provide a dgemmFunctor usable only if N is a multiple of blocksize 
+    // and incx==1
+    virtual clblasDgemmFunctor * select_dgemm_specific(clblasDgemmFunctor::Args & args);
+	virtual clblasSgemmFunctor * select_sgemm_specific(clblasSgemmFunctor::Args & args);
+    virtual clblasDtrsmFunctor * select_dtrsm_specific(clblasDtrsmFunctor::Args & args);
+
+};
+
+
+#endif // _CLBLAS_FUNCTION_SELECTOR_HAWAII_
diff --git a/src/library/blas/functor/include/hawaii_dgemmChannelConflict.h b/src/library/blas/functor/include/hawaii_dgemmChannelConflict.h
new file mode 100644
index 0000000..3df01af
--- /dev/null
+++ b/src/library/blas/functor/include/hawaii_dgemmChannelConflict.h
@@ -0,0 +1,22 @@
+#ifndef HAWAII_DGEMMMCHANNELCONFLICT
+#define HAWAII_DGEMMMCHANNELCONFLICT
+
+#include "gcn_dgemm.h"
+
+
+class clBlashawaiiDgemmChannelConflictFunctor  : public clblasDgemmFunctorGCN 
+{
+
+  private:  // Constructor & Destructor
+
+  clBlashawaiiDgemmChannelConflictFunctor(Args & args, const Variant * variant, cl_int & err) ;
+  
+public:
+
+  // Provide a suitable hawaii_dgemmChannelConflict for the specified args
+  // or NULL if none 
+  static clBlashawaiiDgemmChannelConflictFunctor * provide(clblasDgemmFunctor::Args & args) ;
+
+};
+
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/include/hawaii_dgemmSplitKernel.h b/src/library/blas/functor/include/hawaii_dgemmSplitKernel.h
new file mode 100644
index 0000000..e69d552
--- /dev/null
+++ b/src/library/blas/functor/include/hawaii_dgemmSplitKernel.h
@@ -0,0 +1,46 @@
+#ifndef HAWAII_DGEMMMSPLITKERNEL
+#define HAWAII_DGEMMMSPLITKERNEL
+
+#include "gcn_dgemm.h"
+
+
+class clBlashawaiiDgemmSplitKernelFunctor  : public clblasDgemmFunctorGCN 
+{
+public:
+  struct Variant 
+  {    
+    const char *    variantName;
+    const char *    kernel_name[4] ;  //order is main, row, column, single
+    const char *    source ;   // the kernel source (shall be unique)
+    const char *    build_options;
+    const char *    bin ; 
+    size_t          bin_size ; 
+    clblasTranspose transA ;   //
+    clblasTranspose transB ;   //
+    unsigned        divK ;     // Required divisor of N  (use 1 when N can be of any value) 
+    size_t          ls[2]  ;   // Local size (the work-group size)
+    size_t          bwi[2] ;   // Block size work-item:  Number of elements calculated by each work items 
+                               // So basically each kernel is computing a block of
+                               //   (ls[0]*bwi[0]) x (ls[1]*bwi[1])  
+                               // elements of C. 
+    std::string mult;
+    
+  } ;
+
+
+  private:  // Constructor & Destructor
+
+  clBlashawaiiDgemmSplitKernelFunctor(Args & args, const Variant * variant, cl_int & err) ;
+  cl_int KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[4], Args &args);
+  const Variant * m_variantSplit ; // Pointer to a 'const static' object describing the kernel variant. 
+  
+public:
+
+  // Provide a suitable hawaii_dgemmChannelConflict for the specified args
+  // or NULL if none 
+  static clBlashawaiiDgemmSplitKernelFunctor * provide(clblasDgemmFunctor::Args & args) ;
+  virtual clblasStatus execute(Args &args) ;
+
+};
+
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/include/hawaii_sgemmBranchKernel.h b/src/library/blas/functor/include/hawaii_sgemmBranchKernel.h
new file mode 100644
index 0000000..c6ff9f4
--- /dev/null
+++ b/src/library/blas/functor/include/hawaii_sgemmBranchKernel.h
@@ -0,0 +1,50 @@
+/*
+Handles non multiples of 16, 32, 48, 64, 94 SGEMM in one kernel
+Only non multiples of 32 (NT) is implemented right now.
+*/
+#ifndef HAWAII_SGEMMBRANCHKERNEL
+#define HAWAII_SGEMMBRANCHKERNEL
+
+#include "gcn_sgemm.h"
+
+
+class clBlashawaiiSgemmBranchKernelFunctor  : public clblasSgemmFunctorGCN 
+{
+public:
+  struct Variant 
+  {    
+    const char *    variantName;
+    const char *    kernel_name[1] ;  //just one kernel here
+    const char *    source ;   // the kernel source (shall be unique)
+    const char *    build_options;
+    const char *    bin ; 
+    size_t          bin_size ; 
+    clblasTranspose transA ;   //
+    clblasTranspose transB ;   //
+    unsigned        divK ;     // Required divisor of N  (use 1 when N can be of any value) 
+    size_t          ls[2]  ;   // Local size (the work-group size)
+    size_t          bwi[2] ;   // Block size work-item:  Number of elements calculated by each work items 
+                               // So basically each kernel is computing a block of
+                               //   (ls[0]*bwi[0]) x (ls[1]*bwi[1])  
+                               // elements of C. 
+    std::string mult;
+    
+  } ;
+
+
+  private:  // Constructor & Destructor
+
+  clBlashawaiiSgemmBranchKernelFunctor(Args & args, const Variant * variant, cl_int & err);
+  cl_int KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[1], Args &args);
+  const Variant * m_variantBranch ; // Pointer to a 'const static' object describing the kernel variant. 
+  
+public:
+
+  // Provide a suitable hawaii_sgemmChannelConflict for the specified args
+  // or NULL if none 
+  static clBlashawaiiSgemmBranchKernelFunctor * provide(clblasSgemmFunctor::Args & args,  char* DevName) ;
+  virtual clblasStatus execute(Args &args) ;
+
+};
+
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/include/hawaii_sgemmSplitKernel.h b/src/library/blas/functor/include/hawaii_sgemmSplitKernel.h
new file mode 100644
index 0000000..0168845
--- /dev/null
+++ b/src/library/blas/functor/include/hawaii_sgemmSplitKernel.h
@@ -0,0 +1,46 @@
+#ifndef HAWAII_SGEMMMSPLITKERNEL
+#define HAWAII_SGEMMMSPLITKERNEL
+
+#include "gcn_sgemm.h"
+
+
+class clBlashawaiiSgemmSplitKernelFunctor  : public clblasSgemmFunctorGCN 
+{
+public:
+  struct Variant 
+  {    
+    const char *    variantName;
+    const char *    kernel_name[4] ;  //order is main, row, column, single
+    const char *    source ;   // the kernel source (shall be unique)
+    const char *    build_options;
+    const char *    bin ; 
+    size_t          bin_size ; 
+    clblasTranspose transA ;   //
+    clblasTranspose transB ;   //
+    unsigned        divK ;     // Required divisor of N  (use 1 when N can be of any value) 
+    size_t          ls[2]  ;   // Local size (the work-group size)
+    size_t          bwi[2] ;   // Block size work-item:  Number of elements calculated by each work items 
+                               // So basically each kernel is computing a block of
+                               //   (ls[0]*bwi[0]) x (ls[1]*bwi[1])  
+                               // elements of C. 
+    std::string mult;
+    
+  } ;
+
+
+  private:  // Constructor & Destructor
+
+  clBlashawaiiSgemmSplitKernelFunctor(Args & args, const Variant * variant, cl_int & err) ;
+  cl_int KernelsLaunch(cl_command_queue queue, cl_kernel Kernel[4], Args &args);
+  const Variant * m_variantSplit ; // Pointer to a 'const static' object describing the kernel variant. 
+  
+public:
+
+  // Provide a suitable hawaii_sgemmChannelConflict for the specified args
+  // or NULL if none 
+  static clBlashawaiiSgemmSplitKernelFunctor * provide(clblasSgemmFunctor::Args & args,  char* DevName) ;
+  virtual clblasStatus execute(Args &args) ;
+
+};
+
+#endif
\ No newline at end of file
diff --git a/src/library/blas/functor/include/tahiti.h b/src/library/blas/functor/include/tahiti.h
new file mode 100644
index 0000000..73a4afc
--- /dev/null
+++ b/src/library/blas/functor/include/tahiti.h
@@ -0,0 +1,41 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#ifndef _CLBLAS_FUNCTION_SELECTOR_TAHITI_
+#define _CLBLAS_FUNCTION_SELECTOR_TAHITI_
+
+#include <functor_selector.h>
+//#include <functor_hawaii_dgemm_NT_MN48.h>
+
+class FunctorSelectorTahiti : public clblasFunctorSelector
+{
+private:
+    FunctorSelectorTahiti();
+
+    static FunctorSelectorTahiti instance;
+
+public:
+    // Provide a dgemmFunctor usable only if N is a multiple of blocksize 
+    // and incx==1
+    virtual clblasDgemmFunctor * select_dgemm_specific(clblasDgemmFunctor::Args & args);
+    virtual clblasDtrsmFunctor * select_dtrsm_specific(clblasDtrsmFunctor::Args & args);
+    virtual clblasSgemmFunctor * select_sgemm_specific(clblasSgemmFunctor::Args & args);
+
+};
+
+
+#endif // _CLBLAS_FUNCTION_SELECTOR_TAHITI_
diff --git a/src/library/blas/functor/tahiti.cc b/src/library/blas/functor/tahiti.cc
new file mode 100644
index 0000000..03392f1
--- /dev/null
+++ b/src/library/blas/functor/tahiti.cc
@@ -0,0 +1,120 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <tahiti.h>
+#include "gpu_dtrsm.h"
+//#include "gcn_dgemm.h"
+#include "gcn_dgemmCommon.h"
+#include "gcn_dgemmSmallMatrices.h"
+#include "gcn_sgemmSmallMatrices.h"
+
+
+FunctorSelectorTahiti FunctorSelectorTahiti::instance ;
+
+
+FunctorSelectorTahiti::FunctorSelectorTahiti()
+    : clblasFunctorSelector(TAHITI)
+{
+    
+}
+
+//
+// The selector function for DGEMM on tahiti 
+//
+//
+clblasDgemmFunctor * FunctorSelectorTahiti::select_dgemm_specific(clblasDgemmFunctor::Args & args)
+{
+#ifdef CLBLAS_TAHITI_DYNAMIC_KERNEL
+	return this->clblasFunctorSelector::select_dgemm_specific(args);
+#else
+  clblasDgemmFunctor * functor;
+
+    bool NN_NT = ((args.transA==clblasNoTrans && args.transB==clblasTrans ) || ( args.transA==clblasNoTrans && args.transB==clblasNoTrans ));
+  bool SmallMatrices = args.M/6*args.N/6<85*85;
+  SmallMatrices= SmallMatrices && ((args.M%24==0&&args.N%24==0)||(args.M%16==0&&args.N%16==0))&&args.K%8==0 &&NN_NT;
+
+
+  if (args.alpha!=0)
+  {
+    if (SmallMatrices)
+    {
+      functor = clBlasGCNDgemmSmallMatricesFunctor::provide(args,  "Tahiti");
+      if (functor) 
+        return functor;
+    }
+    
+
+    functor = clBlasGCNdgemmCommonFunctor::provide(args, "Tahiti");
+    if (functor) 
+      return functor;
+    
+  }
+  // else use the fallback implementation
+  return this->clblasFunctorSelector::select_dgemm_specific(args);
+#endif
+
+}
+
+
+// The selector function for DTRSM on tahiti 
+//
+clblasDtrsmFunctor * FunctorSelectorTahiti::select_dtrsm_specific(clblasDtrsmFunctor::Args & args)
+{
+#ifdef CLBLAS_TAHITI_DYNAMIC_KERNEL
+	return this->clblasFunctorSelector::select_dtrsm_specific(args);
+#else
+  clblasDtrsmFunctor * functor;
+  
+  functor = clblasDtrsmFunctorGpu::provide(args, "Tahiti");
+  if (functor) return functor;
+  
+  // else use the fallback implementation
+  return this->clblasFunctorSelector::select_dtrsm_specific(args);
+#endif
+
+}
+
+clblasSgemmFunctor * FunctorSelectorTahiti::select_sgemm_specific(clblasSgemmFunctor::Args & args)
+{
+#ifdef CLBLAS_TAHITI_DYNAMIC_KERNEL
+	return this->clblasFunctorSelector::select_sgemm_specific(args);
+#else
+  clblasSgemmFunctor * functor;
+  bool Not_TT = ((args.transA==clblasNoTrans && args.transB==clblasTrans ) || ( args.transA==clblasNoTrans && args.transB==clblasNoTrans ) || ( args.transA==clblasTrans && args.transB==clblasNoTrans ));
+  bool SmallMatrices = args.M/6*args.N/6<100*100 || ((args.M%64!=0 && args.N%64!=0 && args.M<1900 &&args.N<1900 ) && (args.M%96!=0 && args.N%96!=0 && args.M<1900 &&args.N<1900 ));
+  SmallMatrices= (SmallMatrices && (args.M%32==0&&args.N%32==0)) ;
+  SmallMatrices=SmallMatrices&&Not_TT&&args.K%16==0;
+
+
+  if (args.alpha!=0)
+  {
+    if (SmallMatrices)
+    {
+      functor = clBlasGCNSgemmSmallMatricesFunctor::provide(args,  "Tahiti");
+      if (functor) 
+        return functor;
+    }
+    
+
+    functor = clblasSgemmFunctorGCN::provide(args, "Tahiti");
+    if (functor) 
+      return functor;
+    
+  }
+  // else use the fallback implementation
+  return this->clblasFunctorSelector::select_sgemm_specific(args);
+#endif
+}
\ No newline at end of file
diff --git a/src/library/blas/generic/binary_lookup.cc b/src/library/blas/generic/binary_lookup.cc
new file mode 100644
index 0000000..90a6fa9
--- /dev/null
+++ b/src/library/blas/generic/binary_lookup.cc
@@ -0,0 +1,685 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#include <binary_lookup.h>
+
+#include <iostream>
+#include <fstream>
+#include <cassert>
+
+#include <stdio.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <devinfo.h>
+
+
+
+
+#ifdef _WIN32
+#include <windows.h>
+#include <direct.h> // for _mkdir
+#else
+#include <unistd.h>
+#endif
+
+extern "C"
+{
+#include <md5sum.h>
+}
+
+// size for clGetDeviceInfo queries
+#define SIZE 256
+
+
+
+
+#define CAPS_DEBUG 0
+
+#include <string.h>
+
+char * sep()
+{
+#ifdef __WIN32
+    return (char*)"\\";
+#else
+    return (char*)"/";
+#endif
+}
+
+static std::string cache_path;
+static bool cache_enabled(false);
+
+extern "C" void clblasInitBinaryCache()
+{
+    const char * path = getenv("CLBLAS_CACHE_PATH");
+    if (path)
+    {
+        cache_path = std::string(path) + sep();
+        cache_enabled = true;
+    }
+    else
+    {
+        cache_path = "";
+    }
+}
+
+BinaryLookup::CacheEntry::CacheEntry(const std::string & filename)
+    : m_filename(filename), m_successful_creation(false)
+{
+
+}
+
+void BinaryLookup::CacheEntry::close()
+{
+#ifdef _WIN32
+    CloseHandle(this->m_handle);
+#else
+    ::close(*(int*)this->m_handle);
+    delete (int*)this->m_handle;
+#endif
+}
+
+bool BinaryLookup::CacheEntry::successful_creation()
+{
+    return this->m_successful_creation;
+}
+
+bool BinaryLookup::CacheEntry::exclusive_create()
+{
+#ifdef _WIN32
+    HANDLE handle = CreateFile(this->m_filename.c_str(), 
+                               GENERIC_WRITE, 
+                               0, // no share with other process
+                               NULL,
+                               CREATE_NEW,
+                               FILE_ATTRIBUTE_NORMAL,
+                               NULL);
+
+    this->m_handle = handle;
+    this->m_successful_creation = (handle != INVALID_HANDLE_VALUE);
+    return this->m_successful_creation;
+#else
+    int * fd = new int[1];
+    *fd = open (this->m_filename.c_str(),
+                O_CREAT | O_EXCL, 
+                S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
+    this->m_handle = fd;
+    this->m_successful_creation = (*fd != -1);
+    return *fd >= 0;
+#endif
+}
+
+BinaryLookup::BinaryLookup(cl_context ctxt, cl_device_id device, const std::string & kernel_name)
+    : m_context(ctxt), m_device(device), m_program(NULL), m_binary(0), m_signature(0), m_cache_enabled(cache_enabled)
+{
+    // initialize the entry name
+    this->m_cache_entry_name = kernel_name;
+
+    if (this->m_cache_enabled)
+    {
+        // retrieve device informations to compute the path of the cache
+        cl_int err = this->retrieveDeviceAndDriverInfo();
+
+        if (err != CL_SUCCESS)
+        {
+            cache_enabled = false;
+            this->m_cache_enabled = false;
+        }
+    }
+}
+
+BinaryLookup::~BinaryLookup()
+{
+    delete this->m_binary;
+    delete this->m_signature;
+}
+
+BinaryLookup::Variant::Variant()
+    : m_kind((VariantKind)0), m_size(0), m_data(0)
+{
+}
+
+BinaryLookup::Variant::Variant(VariantKind kind, char * data, size_t size)
+    : m_kind(kind), m_size(size)
+{
+    this->m_data = new char[this->m_size];
+    memcpy(this->m_data, data, size);
+}
+
+BinaryLookup::Variant::~Variant()
+{
+    // delete this->m_data;
+}
+
+void BinaryLookup::variantInt(int num)
+{
+    m_variants.push_back(Variant(INT, (char*)&num, sizeof(num)));
+}
+ 
+void BinaryLookup::variantDouble(double num)
+{
+    m_variants.push_back(Variant(DOUBLE, (char*)&num, sizeof(num)));
+}
+
+void BinaryLookup::variantCompileOptions(const std::string & opts)
+{
+    m_variants.push_back(Variant(STRING, (char*)opts.c_str(), opts.size()));
+}
+
+void BinaryLookup::variantRaw(const void * data, size_t bytes)
+{
+    m_variants.push_back(Variant(DATA, (char*)data, bytes));
+}
+
+enum BinaryRepresentation
+{
+    LSB,
+    MSB,
+    UNKNOWN
+};
+
+enum BinaryRepresentation getStorageMode(char * data)
+{
+    if (data[0] == 'C' && 
+        data[1] == 'L' && 
+        data[2] == 'B' &&
+        data[3] == '\0')
+        return LSB;
+
+    if (data[0] == 'B' && 
+        data[1] == 'L' && 
+        data[2] == 'C' &&
+        data[3] == '\0')
+        return MSB;
+
+    return UNKNOWN;
+}
+
+void BinaryLookup::finalizeVariant()
+{
+    // serialize variants
+    size_t whole_variant_size_in_bytes = 0;
+
+    // store 1 byte for the variant kind
+    whole_variant_size_in_bytes += this->m_variants.size() * sizeof(int); // for the variant kind
+    whole_variant_size_in_bytes += this->m_variants.size() * sizeof(size_t); // for the variant size
+
+    // add every variant sizes
+    for(size_t i=0 ; i<this->m_variants.size() ; ++i)
+    {
+        const Variant & v = this->m_variants[i];
+
+        // compute the whole size of the signature
+        whole_variant_size_in_bytes += v.m_size;
+    }
+
+    this->m_header.signature_size = whole_variant_size_in_bytes;
+
+    this->m_signature = new char[whole_variant_size_in_bytes];
+    char * current_address = this->m_signature;
+    for(size_t i=0 ; i<this->m_variants.size() ; ++i)
+    {
+        Variant v = this->m_variants[i];
+
+        // write the variant kind
+        memcpy(current_address, &v.m_kind, sizeof(int));
+        current_address += sizeof(v.m_kind);
+
+        // write the variant size
+        memcpy(current_address, &v.m_size, sizeof(v.m_size));
+        current_address += sizeof(v.m_size);
+
+        // write the variant itself
+        memcpy(current_address, v.m_data, v.m_size);
+        current_address += v.m_size;
+    }
+
+    // Update the cache entry name if there are variants...
+    if (whole_variant_size_in_bytes != 0)
+    {
+        char * md5_sum = md5sum(this->m_signature, this->m_header.signature_size);
+        this->m_cache_entry_name = md5_sum;
+        delete md5_sum;
+    }
+    else
+    {
+        this->m_cache_entry_name += ".db";
+    }
+}
+
+bool BinaryLookup::loadHeader(std::ifstream &file, size_t length)
+{
+    file.read ((char*)&this->m_header, sizeof(Header));
+
+    // FIXME: Consider LSB Vs MSB number representation
+    assert(getStorageMode(this->m_header.magic_key) == LSB);
+
+    if (this->m_header.whole_file_size != (int)length)
+    {
+        // the file has not been correctly initialized (yet)
+        return false;
+    }
+
+    return true;
+}
+
+bool BinaryLookup::loadBinaryAndSignature(std::ifstream &file)
+{
+    {
+        this->m_binary    = new unsigned char [this->m_header.binary_size];
+        const std::istream& res = file.read((char*)this->m_binary, this->m_header.binary_size);
+        if (!res.good())
+            return false;
+    }
+
+    {
+        this->m_signature = new char [this->m_header.signature_size];
+        const std::istream& res = file.read((char*)this->m_signature, this->m_header.signature_size);
+
+        if (!res.good())
+            return false;
+
+        this->m_variants.clear();
+
+        char * current = this->m_signature;
+        for (int i=0 ; i<this->m_header.signature_size ; ++i)
+        {
+            Variant v;
+            v.m_kind = *(VariantKind*) current;
+            i += sizeof(int);
+            current += sizeof(int);
+
+            v.m_size = *(size_t*) current;
+            i += sizeof(size_t);
+            current += sizeof(size_t);
+
+            v.m_data = new char[v.m_size];
+            memcpy(v.m_data, current, v.m_size);
+            i += v.m_size;
+            current += v.m_size;
+
+            this->m_variants.push_back(v);
+        }
+    }
+
+    return true;
+}
+
+bool BinaryLookup::tryLoadCacheFile()
+{
+    // may create empty file or may wait until file is ready
+    const std::string & filename = this->m_path + this->m_cache_entry_name;
+    std::ifstream file (filename.c_str(), std::ios_base::binary);
+
+    if (file.is_open())
+    {
+        file.seekg (0, file.end);
+        size_t length = file.tellg();
+        file.seekg (0, file.beg);
+
+        if (length == 0)
+        {
+            // the file is corrupted, so return false
+            return false;
+        }
+
+        bool st;
+        st = loadHeader(file, length);
+
+        if (! st)
+            return false;
+
+        st = loadBinaryAndSignature(file);
+
+        if (! st)
+            return false;
+
+        file.close();
+        return true;
+    }
+    else
+    {
+        return false;
+    }
+}
+
+bool BinaryLookup::found()
+{
+    // if we could not create the directory, it is useless to 
+    if (! this->m_cache_enabled)
+    {
+        return false; // not found
+    }
+
+    this->finalizeVariant(); // serialize variant and cumpute checksum on it
+    // also compute the tree to search from the cache entry (this->m_cache_entry_name, cache path ??)
+
+    if (tryLoadCacheFile())
+    {
+        cl_int err = buildFromBinary(this->m_binary,
+                                     this->m_header.binary_size, 
+                                     NULL);
+
+        // return false if the buildFromBinary failed, true else
+        return err==CL_SUCCESS;
+    }
+
+    return false;
+}
+
+static cl_int getSingleBinaryFromProgram(cl_program program,
+                                         std::vector<unsigned char*> & binary)
+{
+    // 3 - Determine the size of each program binary
+    size_t size;
+    cl_int err = clGetProgramInfo(program, CL_PROGRAM_BINARY_SIZES,
+                                  sizeof(size_t),
+                                  &size, NULL);
+    if (err != CL_SUCCESS)
+    {
+        std::cerr << "Error querying for program binary sizes" << std::endl;
+        return err;
+    }
+
+    binary.resize(size);
+    binary[0] = new unsigned char[size];
+
+    unsigned char * binary_address[1] = { binary[0] };
+
+    // 4 - Get all of the program binaries
+    err = clGetProgramInfo(program, CL_PROGRAM_BINARIES, 1 * sizeof(unsigned char*),
+                           binary_address, NULL);
+
+
+    if (err != CL_SUCCESS)
+    {
+#if CAPS_DEBUG
+        std::cerr << "Error querying for program binaries" << std::endl;
+#endif
+        return err;
+    }
+
+    return CL_SUCCESS;
+}
+
+cl_int BinaryLookup::writeCacheFile(std::vector<unsigned char*> &data)
+{
+    if (! this->m_cache_enabled)
+    {
+        return 0;
+    }
+
+    // exclusive open to ensure that only one thread will write the file
+    const std::string & filename = this->m_path + this->m_cache_entry_name;
+
+    CacheEntry cache_file(filename);
+    bool created = cache_file.exclusive_create();
+
+    // try to exclusively create the cache file on the disk
+    if (created)
+    {
+        // if it was created by the current thread, this one will write into cache file
+        cache_file.close();
+
+        const std::string & filename = this->m_path + this->m_cache_entry_name;
+        std::ofstream file (filename.c_str(), std::ios_base::binary);
+
+        file.write((char*)&this->m_header, sizeof(m_header));
+        file.write((char*)data[0], this->m_header.binary_size);
+        file.write((char*)this->m_signature, this->m_header.signature_size);
+        file.close();
+
+        return CL_SUCCESS;
+    }
+
+    // other thread do not write the cache file
+    //Ben : do we really need to output something here, all the different branches return 0 (CL_SUCCESS)
+    return CL_SUCCESS;
+}
+
+cl_int BinaryLookup::populateCache()
+{
+    // FIXME: support MSB
+    this->m_header.magic_key[0] = 'C';
+    this->m_header.magic_key[1] = 'L';
+    this->m_header.magic_key[2] = 'B';
+    this->m_header.magic_key[3] = '\0';
+
+    std::vector<unsigned char*> data;
+    cl_int err = getSingleBinaryFromProgram(this->m_program, data);
+
+    if (err != CL_SUCCESS)
+    {
+        return err;
+    }
+
+    this->m_header.header_size = sizeof(Header);
+    this->m_header.binary_size = data.size();
+    this->m_header.whole_file_size = this->m_header.header_size + this->m_header.binary_size + this->m_header.signature_size;
+
+    err = writeCacheFile(data);
+
+    return CL_SUCCESS;
+}
+
+cl_int BinaryLookup::buildFromSource(const char * source)
+{
+    cl_int err;
+    this->m_program = BinaryLookup::buildProgramFromSource(source,
+                                                           this->m_context,
+                                                           this->m_device,
+                                                           err);
+
+    if (err != CL_SUCCESS)
+    {
+        fprintf(stderr, "Warning: clCreateProgramWithSource failed with code %d\n", err);
+        return err;
+    }
+
+    // write to the cache
+    this->populateCache();
+
+    return CL_SUCCESS;
+}
+
+cl_int BinaryLookup::buildFromLoadedBinary(const void * data,
+                                           size_t len,
+                                           const char * BuildOption)
+{
+    cl_int err;
+    this->m_program = BinaryLookup::buildProgramFromBinary((char*) data,
+                                                           len,
+                                                           this->m_context,
+                                                           this->m_device,
+                                                           err,
+                                                           BuildOption);
+
+    return err;
+}
+
+cl_int BinaryLookup::buildFromBinary(const void * data,
+                                     size_t len,
+                                     const char * BuildOption )
+{
+    cl_int err = buildFromLoadedBinary(data, len, BuildOption);
+    if (err != CL_SUCCESS)
+        return err;
+
+    // write to the cache
+    this->populateCache();
+
+    return CL_SUCCESS;
+}
+
+cl_program BinaryLookup::buildProgramFromSource(const char * source,
+                                                cl_context context,
+                                                cl_device_id device,
+                                                cl_int & err,
+                                                const char * options)
+{
+    cl_program program = clCreateProgramWithSource(context, 1, (const char **)&source, NULL, &err);
+
+    if (err != CL_SUCCESS)
+        return NULL;
+
+    err = clBuildProgram(program,
+                         1, /* FIXME: 1 device */
+                         &device,
+                         options, 
+                         NULL, 
+                         NULL);
+
+    if (err != CL_SUCCESS)
+        return NULL;
+
+    return program;
+}
+
+
+
+cl_program BinaryLookup::buildProgramFromBinary(const char * data,
+                                                size_t data_size,
+                                                cl_context context,
+                                                cl_device_id device,
+                                                cl_int & err,
+                                                const char * options)
+{
+    cl_program program = clCreateProgramWithBinary(context,
+                                                   1, // num_device
+                                                   &device, // device_list
+                                                   &data_size, // lengths
+                                                   (const unsigned char **)&data,
+                                                   NULL,
+                                                   &err);
+    if (err != CL_SUCCESS)
+    {
+        fprintf(stderr, "Warning: clCreateProgramWithBinary failed with code %d\n", err);
+        return NULL;
+    }
+
+    err = clBuildProgram(program,
+                         1, /* FIXME: 1 device */
+                         &device,
+                         options,
+                         NULL,
+                         NULL);
+
+    if (err != CL_SUCCESS)
+    {
+        return NULL;
+    }
+
+    return program;
+}
+
+cl_program BinaryLookup::getProgram()
+{
+    return this->m_program;
+}
+
+void BinaryLookup::setProgram(cl_program program)
+{
+    this->m_program = program;
+}
+
+
+static int make_directory(const std::string &path)
+{
+#ifdef _WIN32
+    return _mkdir (path.c_str());
+#else
+    return mkdir (path.c_str(), S_IRWXU);
+#endif
+}
+
+static void do_mkdir(const std::string &path)
+{
+    int st = make_directory (path.c_str());
+
+    if (st != 0)
+    {
+        if ( errno != EEXIST )
+        {
+            std::string tmp = "Cannot not create directory '" + std::string(path) + "': ";
+            throw tmp;
+        }
+    }
+}
+
+cl_int BinaryLookup::retrieveDeviceAndDriverInfo()
+{
+    char m_device_vendor[SIZE];
+    char m_device_name[SIZE];
+    char m_driver_version[SIZE];
+
+    cl_int err = clGetDeviceInfo(this->m_device, CL_DEVICE_VENDOR, sizeof(m_device_vendor),
+                                 &m_device_vendor, NULL);
+    if (err != CL_SUCCESS)
+    {
+        return err;
+    }
+
+    err = clGetDeviceInfo(this->m_device, CL_DEVICE_NAME, sizeof(m_device_name),
+                          &m_device_name, NULL);
+    if (err != CL_SUCCESS)
+    {
+        return err;
+    }
+
+    err = clGetDeviceInfo(this->m_device, CL_DRIVER_VERSION, sizeof(m_driver_version),
+                          &m_driver_version, NULL);
+    if (err != CL_SUCCESS)
+    {
+        return err;
+    }
+
+#if CAPS_DEBUG
+    fprintf(stderr, "device vendor = %s\n", this->m_device_vendor);
+    fprintf(stderr, "device name = %s\n", this->m_device_name);
+    fprintf(stderr, "driver version = %s\n", this->m_driver_version);
+#endif
+
+    try
+    {
+        const std::string & root = (std::string(cache_path) + m_device_vendor + sep());
+        do_mkdir(root.c_str());
+
+        const std::string & root2 = (root + m_device_name + sep());
+        do_mkdir(root2.c_str());
+
+        const std::string & root3 = (root2 + m_driver_version + sep());
+        do_mkdir(root3.c_str());
+
+        const std::string & root4 = (root3 + this->m_cache_entry_name + sep());
+        do_mkdir(root4.c_str());
+
+        this->m_path = root4;
+        
+        return CL_SUCCESS;
+    }
+    catch (std::string & e)
+    {
+        fprintf(stderr, "%s\n", e.c_str());
+        cache_enabled = false;
+        this->m_cache_enabled = false;
+
+        return CL_INVALID_VALUE;
+    }
+}
diff --git a/src/library/blas/generic/common.c b/src/library/blas/generic/common.c
index 1464f79..236640d 100644
--- a/src/library/blas/generic/common.c
+++ b/src/library/blas/generic/common.c
@@ -416,13 +416,13 @@ Kernel
     cl_device_id device,
     cl_context context,
     SolverKgen kernelGenerator,
+    cl_program program,
     const SubproblemDim *dims,
     const PGranularity *pgran,
     const CLBLASKernExtra *extra,
     const char *buildOpts,
     cl_int *error)
 {
-
     cl_int err;
     char *source;
     ssize_t size;
@@ -435,6 +435,17 @@ Kernel
 	printf("PG : wgSize[0] : %d, wgSize[1] : %d, wfSize: %d\n",  pgran->wgSize[0], pgran->wgSize[1], pgran->wfSize);
 	#endif
 
+    kernel = allocKernel();
+
+    if (kernel == NULL) {
+        free(source);
+        storeErrorCode(error, CL_OUT_OF_HOST_MEMORY);
+        return NULL;
+    }
+
+
+    if (kernelGenerator)
+    {
     size = kernelGenerator(NULL, 0, dims, pgran, (void*)extra);
     if (size < 0) {
         storeErrorCode(error, CL_OUT_OF_HOST_MEMORY);
@@ -451,12 +462,7 @@ Kernel
         return NULL;
     }
 
-	kernel = allocKernel();
-    if (kernel == NULL) {
-        free(source);
-        storeErrorCode(error, CL_OUT_OF_HOST_MEMORY);
-        return NULL;
-    }
+
 
     log = allocBuildLog();
 
@@ -502,6 +508,11 @@ Kernel
         storeErrorCode(error, err);
         return NULL;
     }
+    }
+    else 
+    {
+        kernel->program = program;
+    }
 
     kernel->extraSize = sizeof(CLBLASKernExtra);
     kernel->extra = calloc(1, kernel->extraSize);
diff --git a/src/library/blas/generic/common2.cc b/src/library/blas/generic/common2.cc
new file mode 100644
index 0000000..05bbdc6
--- /dev/null
+++ b/src/library/blas/generic/common2.cc
@@ -0,0 +1,98 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+/*
+ * Common C functions using C++ APIs
+ */
+
+#include <binary_lookup.h>
+
+#include <clblas-internal.h>
+
+#include <stdio.h>
+
+#define CAPS_DEBUG 0
+
+extern "C" Kernel* makeKernelCached(cl_device_id device,
+                                    cl_context context,
+                                    solver_id_t sid,
+                                    KernelKey * key,
+                                    SolverKgen kernelGenerator,
+                                    const SubproblemDim *dims,
+                                    const PGranularity *pgran,
+                                    const CLBLASKernExtra *extra,
+                                    const char *buildOpts,
+                                    cl_int *error)
+{
+    // For now on, use the solver id to identify the kernel
+    // FIXME: it should be better to use the FunctionID but it sounds that there 
+    // is a confusion between BlasFunctionID and BlasRoutineID in makeSolutionSeq() function...
+    char name[20];
+    sprintf(name, "solver%d", (int)sid);
+
+    BinaryLookup bl(context, device, name);
+
+    // Use the whole extra field as signature to identify the kernel binary for now on
+    // may be improved...
+    bl.variantInt(sid);
+    bl.variantInt(key->nrDims);
+    bl.variantRaw(key->subdims, sizeof(SubproblemDim) * key->nrDims);
+    bl.variantRaw(extra, sizeof(CLBLASKernExtra));
+
+    if (bl.found())
+    {
+#if CAPS_DEBUG
+        printf("Kernel loaded from cache\n");
+#endif
+        return makeKernel(device,
+                          context,
+                          0, // generator is not needed because the program
+                             // was loaded from the disk
+                          bl.getProgram(), // pass the program loaded from the
+                                           // disk
+                          dims,
+                          pgran,
+                          extra,
+                          buildOpts,
+                          error);
+
+    }
+    else
+    {
+#if CAPS_DEBUG
+        printf("Kernel generated from source\n");
+#endif
+        Kernel * kernel = makeKernel(device,
+                                     context,
+                                     kernelGenerator,
+                                     0, // cl_program = 0 because it was not loaded from the disk
+                                     dims,
+                                     pgran,
+                                     extra,
+                                     buildOpts,
+                                     error);
+
+        bl.setProgram(kernel->program);
+
+        bl.populateCache();
+
+        return kernel;
+    }
+}
+
+
+
+
diff --git a/src/library/blas/generic/functor_cache.cc b/src/library/blas/generic/functor_cache.cc
new file mode 100644
index 0000000..c0307b5
--- /dev/null
+++ b/src/library/blas/generic/functor_cache.cc
@@ -0,0 +1,80 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include <stdio.h>
+#include <fstream>
+#include <iostream>
+#include <ios>
+
+#include <../functor/include/functor.h>
+#include <clblas-internal.h>
+
+#include <vector>
+#include <set>
+
+
+
+// ==================================================
+// == clblasFunctorCacheBase
+// ==================================================
+
+typedef std::set<clblasFunctorCacheBase*> clblasFunctorCacheSet ; 
+
+// Provide the set of all existing functor cache
+//
+// Remark: Since the set is typically populated by the constructors
+//         of global objects, we use the "construct on first use" 
+//         idiom, to avoid the infamous "static initialization order fiasco".
+//         See for example  http://www.parashift.com/c++-faq/static-init-order.html
+// 
+// Remark: The current implementation is not thread-safe but that should
+//         be fine since the cache is supposed to be populated at startup 
+//         (assuming that all functor caches are global objects) and 
+//
+static clblasFunctorCacheSet & getFunctorCacheSet() 
+{
+  static clblasFunctorCacheSet * all = new clblasFunctorCacheSet ; 
+  return * all ; 
+}
+
+//
+// This function is supposed to be called from clblasTearDown to empty all caches
+//
+extern "C" void cleanFunctorCaches(void) 
+{
+  // Ask each registered cache to clean itself. 
+  clblasFunctorCacheSet & all = getFunctorCacheSet() ; 
+  for (clblasFunctorCacheSet::iterator it= all.begin(); it!=all.end(); ++it)
+    {
+      clblasFunctorCacheBase * cache = *it ; 
+      cache->discardAll() ; 
+    }
+}
+
+clblasFunctorCacheBase::clblasFunctorCacheBase()
+{
+  //  if ( _cleanFunctorCachesHook == 0 ) 
+  //   _cleanFunctorCachesHook = cleanFunctorCaches ; // Install the hook to call cleanFunctorCaches
+
+  clblasFunctorCacheSet & all = getFunctorCacheSet() ; 
+  all.insert(this) ; 
+}  
+
+clblasFunctorCacheBase::~clblasFunctorCacheBase() 
+{
+  clblasFunctorCacheSet & all = getFunctorCacheSet() ; 
+  all.erase(this) ; 
+}
diff --git a/src/library/blas/generic/solution_seq_make.c b/src/library/blas/generic/solution_seq_make.c
index 8a5e402..b92e18e 100644
--- a/src/library/blas/generic/solution_seq_make.c
+++ b/src/library/blas/generic/solution_seq_make.c
@@ -584,10 +584,12 @@ makeSolutionSeq(
 					printf("Build options used : %s\n", bopts);
 					#endif
 
-                    kernel = makeKernel(key.device, key.context,
+                    kernel = makeKernelCached(key.device, key.context,
+                                              sid, &key,
                                         pattern->sops->genKernel,
                                         &dims[firstDimIdx], &step->pgran,
                                         &extra, bopts, &err);
+
                 }
 
                 if (kernel == NULL) {
diff --git a/src/library/blas/gens/blas_kgen.h b/src/library/blas/gens/blas_kgen.h
index 6fb1410..b48545e 100644
--- a/src/library/blas/gens/blas_kgen.h
+++ b/src/library/blas/gens/blas_kgen.h
@@ -52,7 +52,6 @@
 #include "tile.h"
 #include "fetch.h"
 
-
 #define BLAS_KGEN_FORMAT 1
 
 #define genInternalLoopEnd(ctx) kgenEndBranch(ctx, NULL)
@@ -905,6 +904,4 @@ checkGenRestoreTailCoords(
 UpdateResultFlags
 tailStatusToUpresFlags(TailStatus status);
 
-
-
 #endif /* BLAS_KGEN_H_ */
diff --git a/src/library/blas/gens/blas_subgroup.c b/src/library/blas/gens/blas_subgroup.c
index 9c87d53..59163ed 100644
--- a/src/library/blas/gens/blas_subgroup.c
+++ b/src/library/blas/gens/blas_subgroup.c
@@ -148,6 +148,8 @@ calcMergeStepSubgrN(
                                    vecLenC * 16 );//2x8-byte double
             break;
 
+        default:
+          break ;
     }
 
     if( 0==subgPerStep ){
@@ -263,6 +265,8 @@ static int declareSubgrLDS(
 
             break;
 
+        default:  // to avoid compilation warning
+            break;   
     }
 
     kgenAddStmt( pCtx, tmp );
@@ -525,4 +529,4 @@ subgGetDefaultDecomp(
     }
 
     return 0;
-}
\ No newline at end of file
+}
diff --git a/src/library/blas/gens/clTemplates/dgemm_NT_MN48.cl b/src/library/blas/gens/clTemplates/dgemm_NT_MN48.cl
new file mode 100644
index 0000000..f5703ce
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/dgemm_NT_MN48.cl
@@ -0,0 +1,347 @@
+#ifndef PURE_CL
+static const char *DGEMM_NT_MN48_KERNEL = "
+
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm(__global double * C,
+                    __global double2 const * restrict B,
+                    __global double2 const * restrict A,
+                    uint const N,
+                    uint const M,
+                    uint const K,
+                    double const alpha,
+                    double const beta,
+                    uint ldc,
+                    uint ldb,
+                    uint lda,
+                    uint offsetC,
+                    uint offsetB,
+                    uint offsetA)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    uint gidx = 24*get_group_id(0);
+    uint gidy = 24*get_group_id(1);
+    uint idx  = get_local_id(0);
+    uint idy  = get_local_id(1);
+    uint GX   = (gidx+idx) << 1;
+    uint GY   = (gidy+idy) << 1;
+    uint GYX  = (gidy+idx) << 1;
+
+    uA.d += GX  + idy*lda;
+    uB.d += GYX + idy*ldb;
+
+    __local double* plA = lA + idy*49 + 2*idx;
+    __local double* plB = lB + idy*49 + 2*idx;
+    uint ik = 0;
+    for(unsigned int block_k = 0; block_k < K; block_k+=8)
+    {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        vstore2(uA.d2v[0 ], 0, plA+0 );
+        vstore2(uA.d2v[8 ], 0, plA+16);
+        vstore2(uA.d2v[16], 0, plA+32);
+        vstore2(uB.d2v[0 ], 0, plB+0 );
+        vstore2(uB.d2v[8 ], 0, plB+16);
+        vstore2(uB.d2v[16], 0, plB+32);
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx << 1;
+        uint offB = idy << 1;
+        for(unsigned int k = 0 ; k < 8; k++)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 49;
+            offB += 49;
+            rC[0][0] = fma(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = fma(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = fma(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = fma(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = fma(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = fma(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = fma(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = fma(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = fma(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = fma(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = fma(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = fma(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = fma(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = fma(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = fma(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = fma(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = fma(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = fma(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = fma(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = fma(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = fma(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = fma(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = fma(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = fma(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = fma(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = fma(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = fma(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = fma(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = fma(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = fma(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = fma(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = fma(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = fma(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = fma(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = fma(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = fma(rA[5],rB[5],rC[5][5]);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    }
+    uint index;
+    GPtr uC;
+    double2 CC, RCv2;
+
+    for (int i=0; i < 3; i++)
+    {
+        int ic = i << 1;
+        index = (GX + i * 16) + (GY +  0) * ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[0][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[1][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += 15 * ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[2][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[3][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += 15 * ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[4][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[5][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+    }
+}
+
+";
+#else
+
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm(__global double * C,
+                    __global double2 const * restrict B,
+                    __global double2 const * restrict A,
+                    uint const N,
+                    uint const M,
+                    uint const K,
+                    double const alpha,
+                    double const beta,
+                    uint ldc,
+                    uint ldb,
+                    uint lda,
+                    uint offsetC,
+                    uint offsetB,
+                    uint offsetA)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    uint gidx = 24*get_group_id(0);
+    uint gidy = 24*get_group_id(1);
+    uint idx  = get_local_id(0);
+    uint idy  = get_local_id(1);
+    uint GX   = (gidx+idx) << 1;
+    uint GY   = (gidy+idy) << 1;
+    uint GYX  = (gidy+idx) << 1;
+
+    uA.d += GX  + idy*lda;
+    uB.d += GYX + idy*ldb;
+
+    __local double* plA = lA + idy*49 + 2*idx;
+    __local double* plB = lB + idy*49 + 2*idx;
+    uint ik = 0;
+    for(unsigned int block_k = 0; block_k < K; block_k+=8)
+    {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        vstore2(uA.d2v[0 ], 0, plA+0 );
+        vstore2(uA.d2v[8 ], 0, plA+16);
+        vstore2(uA.d2v[16], 0, plA+32);
+        vstore2(uB.d2v[0 ], 0, plB+0 );
+        vstore2(uB.d2v[8 ], 0, plB+16);
+        vstore2(uB.d2v[16], 0, plB+32);
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx << 1;
+        uint offB = idy << 1;
+        for(unsigned int k = 0 ; k < 8; k++)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 49;
+            offB += 49;
+            rC[0][0] = fma(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = fma(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = fma(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = fma(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = fma(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = fma(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = fma(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = fma(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = fma(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = fma(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = fma(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = fma(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = fma(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = fma(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = fma(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = fma(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = fma(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = fma(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = fma(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = fma(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = fma(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = fma(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = fma(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = fma(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = fma(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = fma(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = fma(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = fma(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = fma(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = fma(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = fma(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = fma(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = fma(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = fma(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = fma(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = fma(rA[5],rB[5],rC[5][5]);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    }
+    uint index;
+    GPtr uC;
+    double2 CC, RCv2;
+
+    for (int i=0; i < 3; i++)
+    {
+        int ic = i << 1;
+        index = (GX + i * 16) + (GY +  0) * ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[0][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[1][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += 15 * ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[2][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[3][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += 15 * ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[4][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+
+        index += ldc;
+        uC.d = (__global double *)&C[index];
+        vstore2(uC.d2v[0], (size_t) 0, (__private double*) &CC);
+        vstore2(((double2 *)&rC[5][ic])[0], (size_t) 0, (__private double*) &RCv2);
+        CC = fma((double2)(beta), CC, (double2)(alpha) * RCv2);
+        uC.d2v[0] = vload2((size_t) 0, (__private double*) &CC);
+    }
+}
+
+#endif
diff --git a/src/library/blas/gens/clTemplates/dgemm_gcn_SmallMatrices.cl b/src/library/blas/gens/clTemplates/dgemm_gcn_SmallMatrices.cl
new file mode 100644
index 0000000..de61415
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/dgemm_gcn_SmallMatrices.cl
@@ -0,0 +1,1159 @@
+
+// DGEMM kernels for Hawai & Tahiti 
+//
+// All kernels are ColumnMajor. The case RowMajor is handled by an earlier transformation 
+// into an equivalent ColumnMajor (using the property that (A*B)' is equal to B'A' that the 
+// conversion from Row to Column major is basically a transpose) 
+//
+// The naming scheme for the kernels is dgemm_<ta><tb>_<divn>_<divm>_<divk>_<bs0>x<bs1>_<nv0>x<nv1> where 
+//    - <ta> is N or T representing the transpose operation on A
+//    - <tb> is N or T representing the transpose operation on B
+//    - <divn> is a required divisor of N (1 for any value)
+//    - <divm> is a required divisor of M (1 for any value)
+//    - <divk> is a required divisor of K (1 for any value)
+//    - <bs0>x<bs1> is the block size
+//    - <nv0>x<nv1> is the number of points computed per work-item
+//
+// For instance a kernel named 'dgemm_NT_16_32_1_8x8_2x4' 
+//    - would implement C = C + A * B' 
+//    - for N multiple of 16
+//    - for M multiple of 32 
+//    - for any value of K
+//    - using work-groups of size (8,8)
+//    - with each thread computing 2x4 points of C 
+//
+//
+// The kernel prototype shall be compatible with
+//
+//    __kernel void dgemm( __global double const * restrict A, 
+//                         __global double const * restrict B, 
+//                         __global double * C,
+//                         uint   M, 
+//                         uint   N, 
+//                         uint   K,
+//                         double alpha,  
+//                         double beta,   
+//                         uint   lda,
+//                         uint   ldb, 
+//                         uint   ldc, 
+//                         uint   offsetA, 
+//                         uint   offsetB , 
+//                         uint   offsetC 
+//                       )
+//
+//
+//
+//
+//
+
+
+// =====  dgemm_NT_MN48.cl
+
+
+
+
+static const char * dgemm_NT_24_24_8_8x8_3x3__ALPHABETA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NT_24_24_8_8x8_3x3__ALPHABETA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha,  
+  double const beta,   
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[3][3]  = {(double)0};
+  double rA[1][3];
+  double rB[1][3];
+
+  double PreFetchA[3];
+  double PreFetchB[3];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[192];
+  __local double lB[192];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*24+ idxT + idyT*lda;
+  B +=  gidy*24+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*24+idxT;
+  __local double* plB = lB + idyT*24+idxT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  plA[16] = A[16];
+  
+  plB[0] = B[0];
+	plB[8] = B[8];
+  plB[16] = B[16];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8*ldb;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+    PreFetchA[2] = A[16];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8];
+    PreFetchB[2] = B[16];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      
+      offA += 24;
+      offB += 24;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+  
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+     
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*24;
+  C+= idx;
+  C+= gidy*24*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  
+
+}
+
+";
+
+
+
+static const char * dgemm_NT_16_16_8_8x8_2x2__ALPHABETA = "
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NT_16_16_8_8x8_2x2__ALPHABETA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha,  
+  double const beta,   
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[2][2]  = {(double)0};
+  double rA[1][2];
+  double rB[1][2];
+
+  double PreFetchA[2];
+  double PreFetchB[2];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[128];
+  __local double lB[128];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*16+ idxT + idyT*lda;
+  B +=  gidy*16+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*16+idxT;
+  __local double* plB = lB + idyT*16+idxT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  
+  plB[0] = B[0];
+  plB[8] = B[8];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8*ldb;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      
+      offA += 16;
+      offB += 16;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+       
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*16;
+  C+= idx;
+  C+= gidy*16*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc]; 
+
+}
+";
+
+
+
+static const char * dgemm_NT_24_24_8_8x8_3x3__ALPHA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NT_24_24_8_8x8_3x3__ALPHA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha, 
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[3][3]  = {(double)0};
+  double rA[1][3];
+  double rB[1][3];
+
+  double PreFetchA[3];
+  double PreFetchB[3];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[192];
+  __local double lB[192];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*24+ idxT + idyT*lda;
+  B +=  gidy*24+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*24+idxT;
+  __local double* plB = lB + idyT*24+idxT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  plA[16] = A[16];
+  
+  plB[0] = B[0];
+	plB[8] = B[8];
+  plB[16] = B[16];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8*ldb;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+    PreFetchA[2] = A[16];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8];
+    PreFetchB[2] = B[16];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      
+      offA += 24;
+      offB += 24;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+  
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+     
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*24;
+  C+= idx;
+  C+= gidy*24*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0] ;
+  C[8*ldc] = alpha*rC[0][1] ;
+  C[16*ldc] = alpha*rC[0][2];
+  							
+  C+=8;						
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[8*ldc] = alpha*rC[1][1] ;
+  C[16*ldc] = alpha*rC[1][2];
+  							
+  C+=8;						
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[8*ldc] = alpha*rC[2][1] ;
+  C[16*ldc] = alpha*rC[2][2];
+  
+
+}
+";
+
+
+static const char * dgemm_NT_16_16_8_8x8_2x2__ALPHA = "
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NT_16_16_8_8x8_2x2__ALPHA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha, 
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[2][2]  = {(double)0};
+  double rA[1][2];
+  double rB[1][2];
+
+  double PreFetchA[2];
+  double PreFetchB[2];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[128];
+  __local double lB[128];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*16+ idxT + idyT*lda;
+  B +=  gidy*16+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*16+idxT;
+  __local double* plB = lB + idyT*16+idxT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  
+  plB[0] = B[0];
+  plB[8] = B[8];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8*ldb;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      
+      offA += 16;
+      offB += 16;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+       
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*16;
+  C+= idx;
+  C+= gidy*16*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0];
+  C[8*ldc] = alpha*rC[0][1];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0];
+  C[8*ldc] = alpha*rC[1][1]; 
+
+}
+";
+
+
+
+
+
+
+
+
+
+
+
+
+static const char * dgemm_NN_24_24_8_8x8_3x3__ALPHABETA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NN_24_24_8_8x8_3x3__ALPHABETA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha,  
+  double const beta,   
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[3][3]  = {(double)0};
+  double rA[1][3];
+  double rB[1][3];
+
+  double PreFetchA[3];
+  double PreFetchB[3];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[192];
+  __local double lB[192];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*24+ idxT + idyT*lda;
+  B +=  gidy*24*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*24+idxT;
+  __local double* plB = lB + idxT*24+idyT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  plA[16] = A[16];
+  
+  plB[0] = B[0];
+  plB[8] = B[8*ldb];
+  plB[16] = B[16*ldb];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+    PreFetchA[2] = A[16];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8*ldb];
+    PreFetchB[2] = B[16*ldb];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      
+      offA += 24;
+      offB += 24;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+  
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+     
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*24;
+  C+= idx;
+  C+= gidy*24*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  
+
+}
+
+";
+
+
+static const char * dgemm_NN_16_16_8_8x8_2x2__ALPHABETA = "
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NN_16_16_8_8x8_2x2__ALPHABETA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha,  
+  double const beta,   
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[2][2]  = {(double)0};
+  double rA[1][2];
+  double rB[1][2];
+
+  double PreFetchA[2];
+  double PreFetchB[2];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[128];
+  __local double lB[128];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*16+ idxT + idyT*lda;
+  B +=  gidy*16*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*16+idxT;
+  __local double* plB = lB + idxT*16+idyT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  
+  plB[0] = B[0];
+  plB[8] = B[8*ldb];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8*ldb];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      
+      offA += 16;
+      offB += 16;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+       
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*16;
+  C+= idx;
+  C+= gidy*16*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc]; 
+
+}
+";
+
+
+
+static const char * dgemm_NN_24_24_8_8x8_3x3__ALPHA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NN_24_24_8_8x8_3x3__ALPHA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha, 
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[3][3]  = {(double)0};
+  double rA[1][3];
+  double rB[1][3];
+
+  double PreFetchA[3];
+  double PreFetchB[3];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[192];
+  __local double lB[192];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*24+ idxT + idyT*lda;
+  B +=  gidy*24*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*24+idxT;
+  __local double* plB = lB + idxT*24+idyT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  plA[16] = A[16];
+  
+  plB[0] = B[0];
+  plB[8] = B[8*ldb];
+  plB[16] = B[16*ldb];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+    PreFetchA[2] = A[16];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8*ldb];
+    PreFetchB[2] = B[16*ldb];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      
+      offA += 24;
+      offB += 24;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+  
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+     
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*24;
+  C+= idx;
+  C+= gidy*24*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0] ;
+  C[8*ldc] = alpha*rC[0][1] ;
+  C[16*ldc] = alpha*rC[0][2];
+  							
+  C+=8;						
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[8*ldc] = alpha*rC[1][1] ;
+  C[16*ldc] = alpha*rC[1][2];
+  							
+  C+=8;						
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[8*ldc] = alpha*rC[2][1] ;
+  C[16*ldc] = alpha*rC[2][2];
+  
+
+}
+
+";
+
+static const char * dgemm_NN_16_16_8_8x8_2x2__ALPHA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NN_16_16_8_8x8_2x2__ALPHA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha, 
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[2][2]  = {(double)0};
+  double rA[1][2];
+  double rB[1][2];
+
+  double PreFetchA[2];
+  double PreFetchB[2];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[128];
+  __local double lB[128];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*16+ idxT + idyT*lda;
+  B +=  gidy*16*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*16+idxT;
+  __local double* plB = lB + idxT*16+idyT;
+
+  plA[0] = A[0];
+  plA[8] = A[8];
+  
+  plB[0] = B[0];
+  plB[8] = B[8*ldb];
+  
+ 
+	
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+    PreFetchA[0] = A[0];
+    PreFetchA[1] = A[8];
+   
+    PreFetchB[0] = B[0];
+    PreFetchB[1] = B[8*ldb];
+    
+    int offA = idx;
+    int offB = idy;
+
+
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+     
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      
+      offA += 16;
+      offB += 16;
+			
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+  
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+       
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]   = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    
+    plB[0]   = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*16;
+  C+= idx;
+  C+= gidy*16*ldc;
+  C+= idy*ldc;
+	
+  C[0*ldc] = alpha*rC[0][0];
+  C[8*ldc] = alpha*rC[0][1];
+  
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0];
+  C[8*ldc] = alpha*rC[1][1]; 
+
+}
+";
+
diff --git a/src/library/blas/gens/clTemplates/dgemm_hawai.cl b/src/library/blas/gens/clTemplates/dgemm_hawai.cl
new file mode 100644
index 0000000..9fc7832
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/dgemm_hawai.cl
@@ -0,0 +1,6371 @@
+
+// DGEMM kernels for Hawai & Tahiti 
+//
+// All kernels are ColumnMajor. The case RowMajor is handled by an earlier transformation 
+// into an equivalent ColumnMajor (using the property that (A*B)' is equal to B'A' that the 
+// conversion from Row to Column major is basically a transpose) 
+//
+// The naming scheme for the kernels is dgemm_<ta><tb>_<divn>_<divm>_<divk>_<bs0>x<bs1>_<nv0>x<nv1> where 
+//    - <ta> is N or T representing the transpose operation on A
+//    - <tb> is N or T representing the transpose operation on B
+//    - <divn> is a required divisor of N (1 for any value)
+//    - <divm> is a required divisor of M (1 for any value)
+//    - <divk> is a required divisor of K (1 for any value)
+//    - <bs0>x<bs1> is the block size
+//    - <nv0>x<nv1> is the number of points computed per work-item
+//
+// For instance a kernel named 'dgemm_NT_16_32_1_8x8_2x4' 
+//    - would implement C = C + A * B' 
+//    - for N multiple of 16
+//    - for M multiple of 32 
+//    - for any value of K
+//    - using work-groups of size (8,8)
+//    - with each thread computing 2x4 points of C 
+//
+//
+// The kernel prototype shall be compatible with
+//
+//    __kernel void dgemm( __global double const * restrict A, 
+//                         __global double const * restrict B, 
+//                         __global double * C,
+//                         uint   M, 
+//                         uint   N, 
+//                         uint   K,
+//                         double alpha,  
+//                         double beta,   
+//                         uint   lda,
+//                         uint   ldb, 
+//                         uint   ldc, 
+//                         uint   offsetA, 
+//                         uint   offsetB , 
+//                         uint   offsetC 
+//                       )
+//
+//
+//
+//
+//
+
+
+// =====  dgemm_NT_MN48.cl
+static const char * dgemm_NT_48_48_8_8x8_6x6__ALPHABETA = "
+
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_48_8_8x8_6x6__ALPHABETA(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    uint block_k = K >> 3;
+    do {
+        __local double* plA = lA + idy*48 + 2*idx;
+        __local double* plB = lB + idy*48 + 2*idx;
+        barrier(CLK_LOCAL_MEM_FENCE);
+        vstore2( uA.d2v[0 ], 0, plA+0  );
+        vstore2( uA.d2v[8 ], 0, plA+16 );
+        vstore2( uA.d2v[16], 0, plA+32 );
+        vstore2( uB.d2v[0 ], 0, plB+0  );
+        vstore2( uB.d2v[8 ], 0, plB+16 );
+        vstore2( uB.d2v[16], 0, plB+32 );
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+       
+	    #pragma unroll 1
+        for(uint k = 0 ; k < 8; k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+    (C[(offset_x +  0) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y +  0) * ldc], alpha * rC[0][0]));
+    (C[(offset_x +  1) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y +  0) * ldc], alpha * rC[0][1]));
+    (C[(offset_x +  0) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y +  1) * ldc], alpha * rC[1][0]));
+    (C[(offset_x +  1) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y +  1) * ldc], alpha * rC[1][1]));
+    (C[(offset_x +  0) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 16) * ldc], alpha * rC[2][0]));
+    (C[(offset_x +  1) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 16) * ldc], alpha * rC[2][1]));
+    (C[(offset_x +  0) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 17) * ldc], alpha * rC[3][0]));
+    (C[(offset_x +  1) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 17) * ldc], alpha * rC[3][1]));
+    (C[(offset_x +  0) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 32) * ldc], alpha * rC[4][0]));
+    (C[(offset_x +  1) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 32) * ldc], alpha * rC[4][1]));
+    (C[(offset_x +  0) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 33) * ldc], alpha * rC[5][0]));
+    (C[(offset_x +  1) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 33) * ldc], alpha * rC[5][1]));
+    (C[(offset_x + 16) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y +  0) * ldc], alpha * rC[0][2]));
+    (C[(offset_x + 17) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y +  0) * ldc], alpha * rC[0][3]));
+    (C[(offset_x + 16) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y +  1) * ldc], alpha * rC[1][2]));
+    (C[(offset_x + 17) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y +  1) * ldc], alpha * rC[1][3]));
+    (C[(offset_x + 16) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 16) * ldc], alpha * rC[2][2]));
+    (C[(offset_x + 17) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 16) * ldc], alpha * rC[2][3]));
+    (C[(offset_x + 16) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 17) * ldc], alpha * rC[3][2]));
+    (C[(offset_x + 17) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 17) * ldc], alpha * rC[3][3]));
+    (C[(offset_x + 16) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 32) * ldc], alpha * rC[4][2]));
+    (C[(offset_x + 17) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 32) * ldc], alpha * rC[4][3]));
+    (C[(offset_x + 16) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 33) * ldc], alpha * rC[5][2]));
+    (C[(offset_x + 17) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 33) * ldc], alpha * rC[5][3]));
+    (C[(offset_x + 32) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y +  0) * ldc], alpha * rC[0][4]));
+    (C[(offset_x + 33) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y +  0) * ldc], alpha * rC[0][5]));
+    (C[(offset_x + 32) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y +  1) * ldc], alpha * rC[1][4]));
+    (C[(offset_x + 33) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y +  1) * ldc], alpha * rC[1][5]));
+    (C[(offset_x + 32) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 16) * ldc], alpha * rC[2][4]));
+    (C[(offset_x + 33) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 16) * ldc], alpha * rC[2][5]));
+    (C[(offset_x + 32) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 17) * ldc], alpha * rC[3][4]));
+    (C[(offset_x + 33) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 17) * ldc], alpha * rC[3][5]));
+    (C[(offset_x + 32) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 32) * ldc], alpha * rC[4][4]));
+    (C[(offset_x + 33) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 32) * ldc], alpha * rC[4][5]));
+    (C[(offset_x + 32) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 33) * ldc], alpha * rC[5][4]));
+    (C[(offset_x + 33) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 33) * ldc], alpha * rC[5][5]));
+}
+
+
+";
+
+static const char * dgemm_NT_48_48_8_8x8_6x6__ALPHA = "
+
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_48_8_8x8_6x6__ALPHA(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    uint block_k = K >> 3;
+    do {
+        __local double* plA = lA + idy*48 + 2*idx;
+        __local double* plB = lB + idy*48 + 2*idx;
+        barrier(CLK_LOCAL_MEM_FENCE);
+        vstore2( uA.d2v[0 ], 0, plA+0  );
+        vstore2( uA.d2v[8 ], 0, plA+16 );
+        vstore2( uA.d2v[16], 0, plA+32 );
+        vstore2( uB.d2v[0 ], 0, plB+0  );
+        vstore2( uB.d2v[8 ], 0, plB+16 );
+        vstore2( uB.d2v[16], 0, plB+32 );
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+       
+	    #pragma unroll 1
+        for(uint k = 0 ; k < 8; k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+    C[(offset_x +  0) + (offset_y +  0) * ldc] = alpha * rC[0][0];
+    C[(offset_x +  1) + (offset_y +  0) * ldc] = alpha * rC[0][1];
+    C[(offset_x +  0) + (offset_y +  1) * ldc] = alpha * rC[1][0];
+    C[(offset_x +  1) + (offset_y +  1) * ldc] = alpha * rC[1][1];
+    C[(offset_x +  0) + (offset_y + 16) * ldc] = alpha * rC[2][0];
+    C[(offset_x +  1) + (offset_y + 16) * ldc] = alpha * rC[2][1];
+    C[(offset_x +  0) + (offset_y + 17) * ldc] = alpha * rC[3][0];
+    C[(offset_x +  1) + (offset_y + 17) * ldc] = alpha * rC[3][1];
+    C[(offset_x +  0) + (offset_y + 32) * ldc] = alpha * rC[4][0];
+    C[(offset_x +  1) + (offset_y + 32) * ldc] = alpha * rC[4][1];
+    C[(offset_x +  0) + (offset_y + 33) * ldc] = alpha * rC[5][0];
+    C[(offset_x +  1) + (offset_y + 33) * ldc] = alpha * rC[5][1];
+    C[(offset_x + 16) + (offset_y +  0) * ldc] = alpha * rC[0][2];
+    C[(offset_x + 17) + (offset_y +  0) * ldc] = alpha * rC[0][3];
+    C[(offset_x + 16) + (offset_y +  1) * ldc] = alpha * rC[1][2];
+    C[(offset_x + 17) + (offset_y +  1) * ldc] = alpha * rC[1][3];
+    C[(offset_x + 16) + (offset_y + 16) * ldc] = alpha * rC[2][2];
+    C[(offset_x + 17) + (offset_y + 16) * ldc] = alpha * rC[2][3];
+    C[(offset_x + 16) + (offset_y + 17) * ldc] = alpha * rC[3][2];
+    C[(offset_x + 17) + (offset_y + 17) * ldc] = alpha * rC[3][3];
+    C[(offset_x + 16) + (offset_y + 32) * ldc] = alpha * rC[4][2];
+    C[(offset_x + 17) + (offset_y + 32) * ldc] = alpha * rC[4][3];
+    C[(offset_x + 16) + (offset_y + 33) * ldc] = alpha * rC[5][2];
+    C[(offset_x + 17) + (offset_y + 33) * ldc] = alpha * rC[5][3];
+    C[(offset_x + 32) + (offset_y +  0) * ldc] = alpha * rC[0][4];
+    C[(offset_x + 33) + (offset_y +  0) * ldc] = alpha * rC[0][5];
+    C[(offset_x + 32) + (offset_y +  1) * ldc] = alpha * rC[1][4];
+    C[(offset_x + 33) + (offset_y +  1) * ldc] = alpha * rC[1][5];
+    C[(offset_x + 32) + (offset_y + 16) * ldc] = alpha * rC[2][4];
+    C[(offset_x + 33) + (offset_y + 16) * ldc] = alpha * rC[2][5];
+    C[(offset_x + 32) + (offset_y + 17) * ldc] = alpha * rC[3][4];
+    C[(offset_x + 33) + (offset_y + 17) * ldc] = alpha * rC[3][5];
+    C[(offset_x + 32) + (offset_y + 32) * ldc] = alpha * rC[4][4];
+    C[(offset_x + 33) + (offset_y + 32) * ldc] = alpha * rC[4][5];
+    C[(offset_x + 32) + (offset_y + 33) * ldc] = alpha * rC[5][4];
+    C[(offset_x + 33) + (offset_y + 33) * ldc] = alpha * rC[5][5];
+}
+
+
+";
+
+static const char * dgemm_NT_32_32_8_8x8_4x4__ALPHABETA = "
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_32_32_8_8x8_4x4__ALPHABETA( __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha,  
+                                       double const beta,   
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA,
+                                       uint const offsetB,
+                                       uint const offsetC)
+{
+    double rC[4][4]  = {(double)0};
+    double rA[1][4];
+    double rB[1][4];
+
+	double PreFetchA[4];
+	double PreFetchB[4];
+
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[264];
+    __local double lB[264];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  gidy*32+ idxT + idyT*ldb;
+
+	__local double* plA = lA + idyT*33+idxT;
+    __local double* plB = lB + idyT*33+idxT;
+
+	plA[0] = A[0];
+    plA[16] = A[16];
+    plA[132] = A[4*lda];
+    plA[148] = A[16+4*lda];
+    plB[0] = B[0];
+    plB[16] = B[16];
+    plB[132] = B[4*ldb];
+    plB[148] = B[16+4*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+	
+    
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+        //barrier(CLK_LOCAL_MEM_FENCE);
+        A += 8*lda;
+        B += 8*ldb;
+       
+	   PreFetchA[0] = A[0];
+	   PreFetchA[1] = A[16];
+	   PreFetchA[2] = A[4*lda];
+	   PreFetchA[3] = A[16+4*lda];
+	   PreFetchB[0] = B[0];
+	   PreFetchB[1] = B[16];
+	   PreFetchB[2] = B[4*ldb];
+	   PreFetchB[3] = B[16+4*ldb];
+
+
+	    int offA = idx;
+        int offB = idy;
+
+
+      
+        for( int k = 0 ; k < 8; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+        }
+
+		barrier(CLK_LOCAL_MEM_FENCE);
+		plA[0]   = PreFetchA[0];
+        plA[16]  = PreFetchA[1];
+        plA[132] = PreFetchA[2];
+        plA[148] = PreFetchA[3];
+        plB[0]   = PreFetchB[0];
+        plB[16]  = PreFetchB[1];
+        plB[132] = PreFetchB[2];
+        plB[148] = PreFetchB[3];
+		barrier(CLK_LOCAL_MEM_FENCE);
+
+      //  A += 8*lda;
+      //  B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+    C+=8;
+}
+";
+
+static const char * dgemm_NT_32_32_8_8x8_4x4__ALPHA = "
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_32_32_8_8x8_4x4__ALPHA( __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha,    
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA,
+                                       uint const offsetB,
+                                       uint const offsetC)
+{
+    double rC[4][4]  = {(double)0};
+    double rA[1][4];
+    double rB[1][4];
+
+	double PreFetchA[4];
+	double PreFetchB[4];
+
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[264];
+    __local double lB[264];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  gidy*32+ idxT + idyT*ldb;
+
+	__local double* plA = lA + idyT*33+idxT;
+    __local double* plB = lB + idyT*33+idxT;
+
+	plA[0] = A[0];
+    plA[16] = A[16];
+    plA[132] = A[4*lda];
+    plA[148] = A[16+4*lda];
+    plB[0] = B[0];
+    plB[16] = B[16];
+    plB[132] = B[4*ldb];
+    plB[148] = B[16+4*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+	
+    
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+        //barrier(CLK_LOCAL_MEM_FENCE);
+        A += 8*lda;
+        B += 8*ldb;
+       
+	   PreFetchA[0] = A[0];
+	   PreFetchA[1] = A[16];
+	   PreFetchA[2] = A[4*lda];
+	   PreFetchA[3] = A[16+4*lda];
+	   PreFetchB[0] = B[0];
+	   PreFetchB[1] = B[16];
+	   PreFetchB[2] = B[4*ldb];
+	   PreFetchB[3] = B[16+4*ldb];
+
+
+	    int offA = idx;
+        int offB = idy;
+
+
+      
+        for( int k = 0 ; k < 8; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+        }
+
+		barrier(CLK_LOCAL_MEM_FENCE);
+		plA[0]   = PreFetchA[0];
+        plA[16]  = PreFetchA[1];
+        plA[132] = PreFetchA[2];
+        plA[148] = PreFetchA[3];
+        plB[0]   = PreFetchB[0];
+        plB[16]  = PreFetchB[1];
+        plB[132] = PreFetchB[2];
+        plB[148] = PreFetchB[3];
+		barrier(CLK_LOCAL_MEM_FENCE);
+
+      //  A += 8*lda;
+      //  B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    C[0*ldc] = alpha*rC[0][0] ;
+    C[8*ldc] = alpha*rC[0][1] ;
+    C[16*ldc] = alpha*rC[0][2];
+    C[24*ldc] = alpha*rC[0][3];
+    C+=8;                     ;
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[8*ldc] = alpha*rC[1][1] ;
+    C[16*ldc] = alpha*rC[1][2];
+    C[24*ldc] = alpha*rC[1][3];
+    C+=8;                     ;
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[8*ldc] = alpha*rC[2][1] ;
+    C[16*ldc] = alpha*rC[2][2];
+    C[24*ldc] = alpha*rC[2][3];
+    C+=8;                     ;
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[8*ldc] = alpha*rC[3][1] ;
+    C[16*ldc] = alpha*rC[3][2];
+    C[24*ldc] = alpha*rC[3][3];
+    C+=8;
+}
+";
+
+
+
+static const char * dgemm_NT_40_40_8_8x8_5x5__ALPHABETA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_40_40_8_8x8_5x5__ALPHABETA( __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha,  
+                                       double const beta,   
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA,
+                                       uint const offsetB,
+                                       uint const offsetC)
+{
+  double rC[5][5]  = {(double)0};
+  double rA[1][5];
+  double rB[1][5];
+
+  double PreFetchA[5];
+  double PreFetchB[5];
+
+  //double PreFetchA_5;
+  //double PreFetchB_5;
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[320];
+  __local double lB[320];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+  
+
+  A +=  gidx*40+ idxT + idyT*lda;
+  B +=  gidy*40+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*40+idxT;
+  __local double* plB = lB + idyT*40+idxT;
+
+  
+
+    plA[0]  = A[0];
+    plA[8]  = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plA[32] = A[32]; 
+    plB[0]  = B[0];
+    plB[8]  = B[8];
+    plB[16] = B[16];
+    plB[24] = B[24];
+    plB[32] = B[32];
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+/*    barrier(CLK_LOCAL_MEM_FENCE);
+
+
+    plA[0] = A[0];
+    plA[8] = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plA[32] = A[32]; 
+    plB[0] = B[0];
+    plB[8] = B[8];
+    plB[16] = B[16];
+    plB[24] = B[24];
+    plB[32] = B[32];
+    barrier(CLK_LOCAL_MEM_FENCE);
+*/
+A += 8*lda;
+B += 8*ldb;
+    PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+	PreFetchA[4] = A[32];
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8];
+	PreFetchB[2] = B[16];
+	PreFetchB[3] = B[24];
+	PreFetchB[4] = B[32];
+
+
+
+    int offA = idx;
+    int offB = idy;
+   // int off256 = 256;
+
+
+#pragma unroll 1
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rA[0][4] = lA[offA + 32];
+
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      rB[0][4] = lB[offB + 32];
+
+
+      offA += 40;
+      offB += 40;
+      //off256 -= 24;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+
+      rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+      rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+      rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+      rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+      rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+
+    }
+
+	barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]  = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    plA[24] = PreFetchA[3];
+    plA[32] = PreFetchA[4];
+    plB[0]  = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    plB[24] = PreFetchB[3];
+    plB[32] = PreFetchB[4];
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*40;
+  C+= idx;
+  C+= gidy*40*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[0][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[1][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[2][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[3][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[4][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[4][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[4][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[4][4] + beta*C[32*ldc];
+   
+}
+";
+
+
+static const char * dgemm_NT_40_40_8_8x8_5x5__ALPHA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_40_40_8_8x8_5x5__ALPHA( __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA,
+                                       uint const offsetB,
+                                       uint const offsetC)
+{
+  double rC[5][5]  = {(double)0};
+  double rA[1][5];
+  double rB[1][5];
+
+  double PreFetchA[5];
+  double PreFetchB[5];
+
+  //double PreFetchA_5;
+  //double PreFetchB_5;
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[320];
+  __local double lB[320];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+  
+
+  A +=  gidx*40+ idxT + idyT*lda;
+  B +=  gidy*40+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*40+idxT;
+  __local double* plB = lB + idyT*40+idxT;
+
+  
+
+    plA[0]  = A[0];
+    plA[8]  = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plA[32] = A[32]; 
+    plB[0]  = B[0];
+    plB[8]  = B[8];
+    plB[16] = B[16];
+    plB[24] = B[24];
+    plB[32] = B[32];
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+/*    barrier(CLK_LOCAL_MEM_FENCE);
+
+
+    plA[0] = A[0];
+    plA[8] = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plA[32] = A[32]; 
+    plB[0] = B[0];
+    plB[8] = B[8];
+    plB[16] = B[16];
+    plB[24] = B[24];
+    plB[32] = B[32];
+    barrier(CLK_LOCAL_MEM_FENCE);
+*/
+A += 8*lda;
+B += 8*ldb;
+    PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+	PreFetchA[4] = A[32];
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8];
+	PreFetchB[2] = B[16];
+	PreFetchB[3] = B[24];
+	PreFetchB[4] = B[32];
+
+
+
+    int offA = idx;
+    int offB = idy;
+   // int off256 = 256;
+
+
+#pragma unroll 1
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rA[0][4] = lA[offA + 32];
+
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      rB[0][4] = lB[offB + 32];
+
+
+      offA += 40;
+      offB += 40;
+      //off256 -= 24;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+
+      rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+      rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+      rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+      rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+      rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+
+    }
+
+	barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]  = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    plA[24] = PreFetchA[3];
+    plA[32] = PreFetchA[4];
+    plB[0]  = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    plB[24] = PreFetchB[3];
+    plB[32] = PreFetchB[4];
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    //  A += 8*lda;
+    //  B += 8*ldb;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*40;
+  C+= idx;
+  C+= gidy*40*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0] ;
+  C[8*ldc] = alpha*rC[0][1] ;
+  C[16*ldc] = alpha*rC[0][2];
+  C[24*ldc] = alpha*rC[0][3];
+  C[32*ldc] = alpha*rC[0][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[8*ldc] = alpha*rC[1][1] ;
+  C[16*ldc] = alpha*rC[1][2];
+  C[24*ldc] = alpha*rC[1][3];
+  C[32*ldc] = alpha*rC[1][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[8*ldc] = alpha*rC[2][1] ;
+  C[16*ldc] = alpha*rC[2][2];
+  C[24*ldc] = alpha*rC[2][3];
+  C[32*ldc] = alpha*rC[2][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[3][0] ;
+  C[8*ldc] = alpha*rC[3][1] ;
+  C[16*ldc] = alpha*rC[3][2];
+  C[24*ldc] = alpha*rC[3][3];
+  C[32*ldc] = alpha*rC[3][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[4][0] ;
+  C[8*ldc] = alpha*rC[4][1] ;
+  C[16*ldc] = alpha*rC[4][2];
+  C[24*ldc] = alpha*rC[4][3];
+  C[32*ldc] = alpha*rC[4][4];
+   
+}
+";
+
+// ============= genericDgemm.cl
+// was DgemmGenericMNK 
+// M, N, K 
+/// local size 8,8
+//padding 32
+static const char * dgemm_NT_1_1_1_8x8_4x4__ALPHABETA = "
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_1_8x8_4x4__ALPHABETA(  __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha,  
+                                       double const beta,   
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA,
+                                       uint const offsetB,
+                                       uint const offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 16;
+  int idyT = idt / 16;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+1*idxT;
+  __local double* plB = lB + idyT*33+1*idxT;
+
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+
+    int CurrentOffSetA = gidx*32+ idxT;
+    int CurrentOffSetB = gidy*32+ idxT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0]   = CurrentOffSetA>=M?0.0:A[0];
+      plA[16]  = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[132] = CurrentOffSetA>=M?0.0:A[4*lda];
+      plA[148] = CurrentOffSetA+16>=M?0.0:A[16+4*lda];
+     
+      plB[0]   = CurrentOffSetB>=N?0.0:B[0];
+      plB[16]  = CurrentOffSetB+16>=N?0.0:B[16];
+      plB[132] = CurrentOffSetB>=N?0.0:B[4*ldb];
+      plB[148] = CurrentOffSetB+16>=N?0.0:B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+      return;
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0*ldc] = alpha*rC[i][0] + beta*C[0*ldc];
+      
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1] + beta*C[8*ldc];
+      
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2] + beta*C[16*ldc];
+      
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3] + beta*C[24*ldc];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+        return;
+      
+
+    }
+  }
+  else
+  {
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0];
+      plA[16] = A[16];
+      plA[132] = A[4*lda];
+      plA[148] = A[16+4*lda];
+      plB[0] = B[0];
+      plB[16] = B[16];
+      plB[132] = B[4*ldb];
+      plB[148] = B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+
+      int offA = idx;
+      int offB = idy;
+     
+
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+ 
+  }
+  
+}
+
+"; 
+
+
+
+static const char * dgemm_NT_1_1_1_8x8_4x4__ALPHA = "
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_1_8x8_4x4__ALPHA(  __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha, 
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA,
+                                       uint const offsetB,
+                                       uint const offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 16;
+  int idyT = idt / 16;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+1*idxT;
+  __local double* plB = lB + idyT*33+1*idxT;
+
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+
+    int CurrentOffSetA = gidx*32+ idxT;
+    int CurrentOffSetB = gidy*32+ idxT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0]   = CurrentOffSetA>=M?0.0:A[0];
+      plA[16]  = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[132] = CurrentOffSetA>=M?0.0:A[4*lda];
+      plA[148] = CurrentOffSetA+16>=M?0.0:A[16+4*lda];
+     
+      plB[0]   = CurrentOffSetB>=N?0.0:B[0];
+      plB[16]  = CurrentOffSetB+16>=N?0.0:B[16];
+      plB[132] = CurrentOffSetB>=N?0.0:B[4*ldb];
+      plB[148] = CurrentOffSetB+16>=N?0.0:B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+      return;
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0*ldc] = alpha*rC[i][0];
+      
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1];
+      
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2];
+      
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+        return;
+      
+
+    }
+  }
+  else
+  {
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0];
+      plA[16] = A[16];
+      plA[132] = A[4*lda];
+      plA[148] = A[16+4*lda];
+      plB[0] = B[0];
+      plB[16] = B[16];
+      plB[132] = B[4*ldb];
+      plB[148] = B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+
+      int offA = idx;
+      int offB = idy;
+     
+
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+
+    C[0*ldc]  = alpha*rC[0][0];
+    C[8*ldc]  = alpha*rC[0][1];
+    C[16*ldc] = alpha*rC[0][2];
+    C[24*ldc] = alpha*rC[0][3];
+    C+=8;                     
+    C[0*ldc]  = alpha*rC[1][0];
+    C[8*ldc]  = alpha*rC[1][1];
+    C[16*ldc] = alpha*rC[1][2];
+    C[24*ldc] = alpha*rC[1][3];
+    C+=8;                     
+    C[0*ldc]  = alpha*rC[2][0];
+    C[8*ldc]  = alpha*rC[2][1];
+    C[16*ldc] = alpha*rC[2][2];
+    C[24*ldc] = alpha*rC[2][3];
+    C+=8;                     
+    C[0*ldc]  = alpha*rC[3][0];
+    C[8*ldc]  = alpha*rC[3][1];
+    C[16*ldc] = alpha*rC[3][2];
+    C[24*ldc] = alpha*rC[3][3];
+ 
+  }
+  
+}
+
+"; 
+// was DgemmGenericMN
+// M, N, K%8 
+static const char * dgemm_NT_1_1_8_8x8_4x4__ALPHABETA = "
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_8_8x8_4x4__ALPHABETA(  __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha,  
+                                       double const beta,  
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA, 
+                                       uint const offsetB, 
+                                       uint const offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 16;
+  int idyT = idt / 16;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+1*idxT;
+  __local double* plB = lB + idyT*33+1*idxT;
+
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+
+    int CurrentOffSetA = gidx*32+ idxT;
+    int CurrentOffSetB = gidy*32+ idxT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0]   = CurrentOffSetA>=M?0.0:A[0];
+      plA[16]  = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[132] = CurrentOffSetA>=M?0.0:A[4*lda];
+      plA[148] = CurrentOffSetA+16>=M?0.0:A[16+4*lda];
+     
+      plB[0]   = CurrentOffSetB>=N?0.0:B[0];
+      plB[16]  = CurrentOffSetB+16>=N?0.0:B[16];
+      plB[132] = CurrentOffSetB>=N?0.0:B[4*ldb];
+      plB[148] = CurrentOffSetB+16>=N?0.0:B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = 1*idx;
+      int offB = 1*idy;
+
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+     {
+      return;
+    }
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0*ldc] = alpha*rC[i][0] + beta*C[0*ldc];
+      
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1] + beta*C[8*ldc];
+      
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2] + beta*C[16*ldc];
+      
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3] + beta*C[24*ldc];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+      {
+        return;
+      }
+
+    }
+  }
+  else
+  {
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0];
+      plA[16] = A[16];
+      plA[132] = A[4*lda];
+      plA[148] = A[16+4*lda];
+      plB[0] = B[0];
+      plB[16] = B[16];
+      plB[132] = B[4*ldb];
+      plB[148] = B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+
+      int offA = idx;
+      int offB = idy;
+
+
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx*1;
+    C+= gidy*32*ldc;
+    C+= idy*1*ldc;
+
+
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+ 
+  }
+  
+}
+
+"; 
+
+
+static const char * dgemm_NT_1_1_8_8x8_4x4__ALPHA = "
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_8_8x8_4x4__ALPHA(  __global double const * restrict A, 
+                                       __global double const * restrict B,
+                                       __global double * C,  
+                                       uint const M, 
+                                       uint const N, 
+                                       uint const K,
+                                       double const alpha, 
+                                       uint lda,
+                                       uint ldb, 
+                                       uint ldc, 
+                                       uint const offsetA, 
+                                       uint const offsetB, 
+                                       uint const offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 16;
+  int idyT = idt / 16;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+1*idxT;
+  __local double* plB = lB + idyT*33+1*idxT;
+
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+
+    int CurrentOffSetA = gidx*32+ idxT;
+    int CurrentOffSetB = gidy*32+ idxT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0]   = CurrentOffSetA>=M?0.0:A[0];
+      plA[16]  = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[132] = CurrentOffSetA>=M?0.0:A[4*lda];
+      plA[148] = CurrentOffSetA+16>=M?0.0:A[16+4*lda];
+     
+      plB[0]   = CurrentOffSetB>=N?0.0:B[0];
+      plB[16]  = CurrentOffSetB+16>=N?0.0:B[16];
+      plB[132] = CurrentOffSetB>=N?0.0:B[4*ldb];
+      plB[148] = CurrentOffSetB+16>=N?0.0:B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = 1*idx;
+      int offB = 1*idy;
+
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+     {
+      return;
+    }
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0*ldc] = alpha*rC[i][0];
+      
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1];
+      
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2];
+      
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+      {
+        return;
+      }
+
+    }
+  }
+  else
+  {
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0];
+      plA[16] = A[16];
+      plA[132] = A[4*lda];
+      plA[148] = A[16+4*lda];
+      plB[0] = B[0];
+      plB[16] = B[16];
+      plB[132] = B[4*ldb];
+      plB[148] = B[16+4*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+
+      int offA = idx;
+      int offB = idy;
+
+
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8*ldb;
+    }
+
+    C+= gidx*32;
+    C+= idx*1;
+    C+= gidy*32*ldc;
+    C+= idy*1*ldc;
+
+
+    C[0*ldc] = alpha*rC[0][0] ;
+    C[8*ldc] = alpha*rC[0][1] ;
+    C[16*ldc] = alpha*rC[0][2];
+    C[24*ldc] = alpha*rC[0][3];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[8*ldc] = alpha*rC[1][1] ;
+    C[16*ldc] = alpha*rC[1][2];
+    C[24*ldc] = alpha*rC[1][3];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[8*ldc] = alpha*rC[2][1] ;
+    C[16*ldc] = alpha*rC[2][2];
+    C[24*ldc] = alpha*rC[2][3];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[8*ldc] = alpha*rC[3][1] ;
+    C[16*ldc] = alpha*rC[3][2];
+    C[24*ldc] = alpha*rC[3][3];
+ 
+  }
+  
+}
+
+"; 
+
+
+//
+// was DgemmGenericK
+//
+static const char * dgemm_NT_32_32_1_8x8_4x4__ALPHABETA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_32_32_1_8x8_4x4__ALPHABETA(  __global double const * restrict A, 
+                                         __global double const * restrict B,
+                                         __global double * C,  
+                                         uint const M, 
+                                         uint const N, 
+                                         uint const K,
+                                         double const alpha,  
+                                         double const beta,   
+                                         uint lda,
+                                         uint ldb, 
+                                         uint ldc, 
+                                         uint const offsetA, 
+                                         uint const offsetB, 
+                                         uint const offsetC)
+{
+    double rC[4][4]  = {(double)0};
+    double rA[1][4];
+    double rB[1][4];
+	
+    double PreFetchA[4];
+    double PreFetchB[4];
+
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[264];
+    __local double lB[264];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  gidy*32+ idxT + idyT*ldb;
+
+    __local double* plA = lA + idyT*33+1*idxT;
+    __local double* plB = lB + idyT*33+1*idxT;
+
+    plA[0]   = A[0];
+    plA[16]  = A[16];
+    plA[132] = A[4*lda];
+    plA[148] = A[16+4*lda];
+    plB[0]   = B[0];
+    plB[16]  = B[16];
+    plB[132] = B[4*ldb];
+    plB[148] = B[16+4*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+    
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        A += 8*lda;
+        B += 8*ldb;
+
+		PreFetchA[0] = A[0];
+		PreFetchA[1] = A[16];
+		PreFetchA[2] = A[4*lda];
+		PreFetchA[3] = A[16+4*lda];
+		PreFetchB[0] = B[0];
+		PreFetchB[1] = B[16];
+		PreFetchB[2] = B[4*ldb];
+		PreFetchB[3] = B[16+4*ldb];
+
+
+
+        int offA = idx;
+        int offB = idy;
+
+
+      
+        for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+        }
+        
+		barrier(CLK_LOCAL_MEM_FENCE);
+		plA[0]   = PreFetchA[0];
+		plA[16]  = PreFetchA[1];
+		plA[132] = PreFetchA[2];
+		plA[148] = PreFetchA[3];
+		plB[0]   = PreFetchB[0];
+		plB[16]  = PreFetchB[1];
+		plB[132] = PreFetchB[2];
+		plB[148] = PreFetchB[3];
+		barrier(CLK_LOCAL_MEM_FENCE);
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+    C+=8;
+}
+
+";
+
+static const char * dgemm_NT_32_32_1_8x8_4x4__ALPHA = "
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_32_32_1_8x8_4x4__ALPHA(  __global double const * restrict A, 
+                                         __global double const * restrict B,
+                                         __global double * C,  
+                                         uint const M, 
+                                         uint const N, 
+                                         uint const K,
+                                         double const alpha,  
+                                         uint lda,
+                                         uint ldb, 
+                                         uint ldc, 
+                                         uint const offsetA, 
+                                         uint const offsetB, 
+                                         uint const offsetC)
+{
+    double rC[4][4]  = {(double)0};
+    double rA[1][4];
+    double rB[1][4];
+	
+    double PreFetchA[4];
+    double PreFetchB[4];
+
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[264];
+    __local double lB[264];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  gidy*32+ idxT + idyT*ldb;
+
+    __local double* plA = lA + idyT*33+1*idxT;
+    __local double* plB = lB + idyT*33+1*idxT;
+
+    plA[0]   = A[0];
+    plA[16]  = A[16];
+    plA[132] = A[4*lda];
+    plA[148] = A[16+4*lda];
+    plB[0]   = B[0];
+    plB[16]  = B[16];
+    plB[132] = B[4*ldb];
+    plB[148] = B[16+4*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+    
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        A += 8*lda;
+        B += 8*ldb;
+
+		PreFetchA[0] = A[0];
+		PreFetchA[1] = A[16];
+		PreFetchA[2] = A[4*lda];
+		PreFetchA[3] = A[16+4*lda];
+		PreFetchB[0] = B[0];
+		PreFetchB[1] = B[16];
+		PreFetchB[2] = B[4*ldb];
+		PreFetchB[3] = B[16+4*ldb];
+
+
+
+        int offA = idx;
+        int offB = idy;
+
+
+      
+        for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+        }
+        
+		barrier(CLK_LOCAL_MEM_FENCE);
+		plA[0]   = PreFetchA[0];
+		plA[16]  = PreFetchA[1];
+		plA[132] = PreFetchA[2];
+		plA[148] = PreFetchA[3];
+		plB[0]   = PreFetchB[0];
+		plB[16]  = PreFetchB[1];
+		plB[132] = PreFetchB[2];
+		plB[148] = PreFetchB[3];
+		barrier(CLK_LOCAL_MEM_FENCE);
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    C[0*ldc] = alpha*rC[0][0] ;
+    C[8*ldc] = alpha*rC[0][1] ;
+    C[16*ldc] = alpha*rC[0][2];
+    C[24*ldc] = alpha*rC[0][3];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[8*ldc] = alpha*rC[1][1] ;
+    C[16*ldc] = alpha*rC[1][2];
+    C[24*ldc] = alpha*rC[1][3];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[8*ldc] = alpha*rC[2][1] ;
+    C[16*ldc] = alpha*rC[2][2];
+    C[24*ldc] = alpha*rC[2][3];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[8*ldc] = alpha*rC[3][1] ;
+    C[16*ldc] = alpha*rC[3][2];
+    C[24*ldc] = alpha*rC[3][3];
+    C+=8;
+}
+
+";
+
+// ============ TNDgemmColumn.cl 
+
+static const char * dgemm_TN_32_32_16_8x16_4x2__ALPHABETA = "
+
+
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_32_32_16_8x16_4x2__ALPHABETA( __global double const * restrict A, 
+                                          __global double const * restrict B, 
+                                          __global double * C,
+                                          uint const M, 
+                                          uint const N, 
+                                          uint const K,
+                                          double const alpha,  
+                                          double const beta,   
+                                          uint lda,
+                                          uint ldb, 
+                                          uint ldc, 
+                                          uint const offsetA, 
+                                          uint const offsetB , 
+                                          uint const offsetC 
+                                          )
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+    
+    for( int block_k=0 ; block_k< K ; block_k+=16)
+    {
+        __local double* plA = lA + idxT*33+ idyT;
+        __local double* plB = lB + idxT*33+ idyT;
+      
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0+0*lda];
+        plA[8] = A[0+8*lda];
+        plA[16] = A[0+16*lda];
+        plA[24] = A[0+24*lda];
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+       
+        for( int k = 0 ; k < 16; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 16];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    
+}
+"; 
+
+
+static const char * dgemm_TN_32_32_16_8x16_4x2__ALPHA = "
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_32_32_16_8x16_4x2__ALPHA( __global double const * restrict A, 
+                                          __global double const * restrict B, 
+                                          __global double * C,
+                                          uint const M, 
+                                          uint const N, 
+                                          uint const K,
+                                          double const alpha,  
+                                          uint lda,
+                                          uint ldb, 
+                                          uint ldc, 
+                                          uint const offsetA, 
+                                          uint const offsetB , 
+                                          uint const offsetC 
+                                          )
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+    
+    for( int block_k=0 ; block_k< K ; block_k+=16)
+    {
+        __local double* plA = lA + idxT*33+ idyT;
+        __local double* plB = lB + idxT*33+ idyT;
+      
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0+0*lda];
+        plA[8] = A[0+8*lda];
+        plA[16] = A[0+16*lda];
+        plA[24] = A[0+24*lda];
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+       
+        for( int k = 0 ; k < 16; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 16];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[16*ldc] = alpha*rC[2][1];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[16*ldc] = alpha*rC[3][1];
+}
+"; 
+
+static const char * dgemm_TN_48_48_8_8x8_6x6__ALPHABETA = "
+
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+__kernel void dgemm_TN_48_48_8_8x8_6x6__ALPHABETA(  __global double const * restrict A,
+                                         __global double const * restrict B,
+                                         __global double * C,  
+                                         uint const M, 
+                                         uint const N, 
+                                         uint const K,
+                                         double const alpha,  
+                                         double const beta,   
+                                         uint lda,
+                                         uint ldb, 
+                                         uint ldc, 
+                                         uint const offsetA, 
+                                         uint const offsetB,
+                                         uint const offsetC
+                                         )
+{
+    double rC[6][6]  = {(double)0};
+    double rA[1][6];
+    double rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[392];
+    __local double lB[392];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 4;
+    int idyT = idt / 4;
+    
+    A +=  gidx*48*lda + idxT + idyT*lda;
+    B +=  gidy*48*ldb+ idxT + idyT*ldb;
+    
+    //for( int block_k=0 ; block_k< K ; block_k+=8)
+    uint block_k = K >> 3;
+    do
+	{
+        __local double* plA = lA + idxT*49+ idyT;
+        __local double* plB = lB + idxT*49+ idyT;
+   
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0];
+        plA[196] = A[4];
+        plA[16] = A[16*lda];
+        plA[212] = A[4+16*lda];
+        plA[32] = A[32*lda];
+        plA[228] = A[4+32*lda];
+        plB[0] = B[0];
+        plB[196] = B[4+0*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[212] = B[4+16*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[228] = B[4+32*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+       
+        for( int k = 0 ; k < 8; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rA[0][4] = lA[offA + 32];
+            rA[0][5] = lA[offA + 40];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            rB[0][4] = lB[offB + 32];
+            rB[0][5] = lB[offB + 40];
+            offA += 49;
+            offB += 49;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]);
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]);
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]);
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]);
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]);
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]);
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]);
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]);
+        }
+        A += 8;
+        B += 8;
+    }
+	while (--block_k > 0);
+
+    C+= gidx*48;
+    C+= idx;
+    C+= gidy*48*ldc;
+    C+= idy*ldc;
+    
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[0][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[0][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[1][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[1][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[2][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[2][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[3][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[3][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[4][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[4][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[4][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[4][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[4][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[5][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[5][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[5][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[5][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[5][5] + beta*C[40*ldc];
+    C+=8;
+}
+"; 
+
+static const char * dgemm_TN_48_48_8_8x8_6x6__ALPHA = "
+
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+__kernel void dgemm_TN_48_48_8_8x8_6x6__ALPHA(  __global double const * restrict A,
+                                         __global double const * restrict B,
+                                         __global double * C,  
+                                         uint const M, 
+                                         uint const N, 
+                                         uint const K,
+                                         double const alpha,  
+                                         uint lda,
+                                         uint ldb, 
+                                         uint ldc, 
+                                         uint const offsetA, 
+                                         uint const offsetB,
+                                         uint const offsetC
+                                         )
+{
+    double rC[6][6]  = {(double)0};
+    double rA[1][6];
+    double rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[392];
+    __local double lB[392];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 4;
+    int idyT = idt / 4;
+    
+    A +=  gidx*48*lda + idxT + idyT*lda;
+    B +=  gidy*48*ldb+ idxT + idyT*ldb;
+    
+    //for( int block_k=0 ; block_k< K ; block_k+=8)
+    uint block_k = K >> 3;
+    do
+	{
+        __local double* plA = lA + idxT*49+ idyT;
+        __local double* plB = lB + idxT*49+ idyT;
+   
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0];
+        plA[196] = A[4];
+        plA[16] = A[16*lda];
+        plA[212] = A[4+16*lda];
+        plA[32] = A[32*lda];
+        plA[228] = A[4+32*lda];
+        plB[0] = B[0];
+        plB[196] = B[4+0*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[212] = B[4+16*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[228] = B[4+32*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+       
+        for( int k = 0 ; k < 8; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rA[0][4] = lA[offA + 32];
+            rA[0][5] = lA[offA + 40];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            rB[0][4] = lB[offB + 32];
+            rB[0][5] = lB[offB + 40];
+            offA += 49;
+            offB += 49;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]);
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]);
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]);
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]);
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]);
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]);
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]);
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]);
+        }
+        A += 8;
+        B += 8;
+    }
+	while (--block_k > 0);
+
+    C+= gidx*48;
+    C+= idx;
+    C+= gidy*48*ldc;
+    C+= idy*ldc;
+    
+    C[0*ldc] = alpha*rC[0][0] ;
+    C[8*ldc] = alpha*rC[0][1] ;
+    C[16*ldc] = alpha*rC[0][2];
+    C[24*ldc] = alpha*rC[0][3];
+    C[32*ldc] = alpha*rC[0][4];
+    C[40*ldc] = alpha*rC[0][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[8*ldc] = alpha*rC[1][1] ;
+    C[16*ldc] = alpha*rC[1][2];
+    C[24*ldc] = alpha*rC[1][3];
+    C[32*ldc] = alpha*rC[1][4];
+    C[40*ldc] = alpha*rC[1][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[8*ldc] = alpha*rC[2][1] ;
+    C[16*ldc] = alpha*rC[2][2];
+    C[24*ldc] = alpha*rC[2][3];
+    C[32*ldc] = alpha*rC[2][4];
+    C[40*ldc] = alpha*rC[2][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[8*ldc] = alpha*rC[3][1] ;
+    C[16*ldc] = alpha*rC[3][2];
+    C[24*ldc] = alpha*rC[3][3];
+    C[32*ldc] = alpha*rC[3][4];
+    C[40*ldc] = alpha*rC[3][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[4][0] ;
+    C[8*ldc] = alpha*rC[4][1] ;
+    C[16*ldc] = alpha*rC[4][2];
+    C[24*ldc] = alpha*rC[4][3];
+    C[32*ldc] = alpha*rC[4][4];
+    C[40*ldc] = alpha*rC[4][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[5][0] ;
+    C[8*ldc] = alpha*rC[5][1] ;
+    C[16*ldc] = alpha*rC[5][2];
+    C[24*ldc] = alpha*rC[5][3];
+    C[32*ldc] = alpha*rC[5][4];
+    C[40*ldc] = alpha*rC[5][5];
+    
+}
+"; 
+
+
+static const char * dgemm_TN_48_48_16_8x8_6x6__ALPHABETA = " 
+
+
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+__kernel void dgemm_TN_48_48_16_8x8_6x6__ALPHABETA(  __global double const * restrict A, 
+                                          __global double const * restrict B,
+                                          __global double * C,  
+                                          uint const M, 
+                                          uint const N, 
+                                          uint const K,
+                                          double const alpha,  
+                                          double const beta,  
+                                          uint lda,
+                                          uint ldb, 
+                                          uint ldc, 
+                                          uint const offsetA, 
+                                          uint const offsetB, 
+                                          uint const offsetC)
+{
+    double rC[6][6]  = {(double)0};
+    double rA[1][6];
+    double rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[784];
+    __local double lB[784];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 8;
+    int idyT = idt / 8;
+    
+    A +=  gidx*48*lda + idxT + idyT*lda;
+    B +=  gidy*48*ldb+ idxT + idyT*ldb;
+    
+    //for( int block_k=0 ; block_k< K ; block_k+=16)
+	uint block_k = K >> 4;
+    do
+    {
+        __local double* plA = lA + idxT*49+ idyT;
+        __local double* plB = lB + idxT*49+ idyT;
+      
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0+0*lda];
+        plA[392] = A[8+0*lda];
+        plA[8] = A[0+8*lda];
+        plA[400] = A[8+8*lda];
+        plA[16] = A[0+16*lda];
+        plA[408] = A[8+16*lda];
+        plA[24] = A[0+24*lda];
+        plA[416] = A[8+24*lda];
+        plA[32] = A[0+32*lda];
+        plA[424] = A[8+32*lda];
+        plA[40] = A[0+40*lda];
+        plA[432] = A[8+40*lda];
+        plB[0] = B[0+0*ldb];
+        plB[392] = B[8+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[400] = B[8+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[408] = B[8+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[416] = B[8+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[424] = B[8+32*ldb];
+        plB[40] = B[0+40*ldb];
+        plB[432] = B[8+40*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+       
+        for( int k = 0 ; k < 16; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rA[0][4] = lA[offA + 32];
+            rA[0][5] = lA[offA + 40];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            rB[0][4] = lB[offB + 32];
+            rB[0][5] = lB[offB + 40];
+            offA += 49;
+            offB += 49;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]);
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]);
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]);
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]);
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]);
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]);
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]);
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]);
+        }
+        A += 16;
+        B += 16;
+    }
+	while (--block_k > 0);
+
+    C+= gidx*48;
+    C+= idx*1;
+    C+= gidy*48*ldc;
+    C+= idy*1*ldc;
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[0][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[0][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[1][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[1][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[2][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[2][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[3][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[3][5] + beta*C[40*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[8*ldc] = alpha*rC[4][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[4][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[4][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[4][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[4][5] + beta*C[40*ldc];
+    C+=8;
+    C[0] = alpha*rC[5][0] + beta*C[0];
+    C[8*ldc] = alpha*rC[5][1] + beta*C[8*ldc];
+    C[16*ldc] = alpha*rC[5][2] + beta*C[16*ldc];
+    C[24*ldc] = alpha*rC[5][3] + beta*C[24*ldc];
+    C[32*ldc] = alpha*rC[5][4] + beta*C[32*ldc];
+    C[40*ldc] = alpha*rC[5][5] + beta*C[40*ldc];
+    C+=8;
+}
+";
+
+
+static const char * dgemm_TN_48_48_16_8x8_6x6__ALPHA = " 
+
+
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+__kernel void dgemm_TN_48_48_16_8x8_6x6__ALPHA(  __global double const * restrict A, 
+                                          __global double const * restrict B,
+                                          __global double * C,  
+                                          uint const M, 
+                                          uint const N, 
+                                          uint const K,
+                                          double const alpha, 
+                                          uint lda,
+                                          uint ldb, 
+                                          uint ldc, 
+                                          uint const offsetA, 
+                                          uint const offsetB, 
+                                          uint const offsetC)
+{
+    double rC[6][6]  = {(double)0};
+    double rA[1][6];
+    double rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[784];
+    __local double lB[784];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 8;
+    int idyT = idt / 8;
+    
+    A +=  gidx*48*lda + idxT + idyT*lda;
+    B +=  gidy*48*ldb+ idxT + idyT*ldb;
+    
+    //for( int block_k=0 ; block_k< K ; block_k+=16)
+	uint block_k = K >> 4;
+    do
+    {
+        __local double* plA = lA + idxT*49+ idyT;
+        __local double* plB = lB + idxT*49+ idyT;
+      
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0+0*lda];
+        plA[392] = A[8+0*lda];
+        plA[8] = A[0+8*lda];
+        plA[400] = A[8+8*lda];
+        plA[16] = A[0+16*lda];
+        plA[408] = A[8+16*lda];
+        plA[24] = A[0+24*lda];
+        plA[416] = A[8+24*lda];
+        plA[32] = A[0+32*lda];
+        plA[424] = A[8+32*lda];
+        plA[40] = A[0+40*lda];
+        plA[432] = A[8+40*lda];
+        plB[0] = B[0+0*ldb];
+        plB[392] = B[8+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[400] = B[8+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[408] = B[8+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[416] = B[8+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[424] = B[8+32*ldb];
+        plB[40] = B[0+40*ldb];
+        plB[432] = B[8+40*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+       
+        for( int k = 0 ; k < 16; k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rA[0][4] = lA[offA + 32];
+            rA[0][5] = lA[offA + 40];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 8];
+            rB[0][2] = lB[offB + 16];
+            rB[0][3] = lB[offB + 24];
+            rB[0][4] = lB[offB + 32];
+            rB[0][5] = lB[offB + 40];
+            offA += 49;
+            offB += 49;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]);
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]);
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]);
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]);
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]);
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]);
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]);
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]);
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]);
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]);
+        }
+        A += 16;
+        B += 16;
+    }
+	while (--block_k > 0);
+
+    C+= gidx*48;
+    C+= idx;
+    C+= gidy*48*ldc;
+    C+= idy*ldc;
+
+    C[0*ldc] = alpha*rC[0][0] ;
+    C[8*ldc] = alpha*rC[0][1] ;
+    C[16*ldc] = alpha*rC[0][2];
+    C[24*ldc] = alpha*rC[0][3];
+    C[32*ldc] = alpha*rC[0][4];
+    C[40*ldc] = alpha*rC[0][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[8*ldc] = alpha*rC[1][1] ;
+    C[16*ldc] = alpha*rC[1][2];
+    C[24*ldc] = alpha*rC[1][3];
+    C[32*ldc] = alpha*rC[1][4];
+    C[40*ldc] = alpha*rC[1][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[8*ldc] = alpha*rC[2][1] ;
+    C[16*ldc] = alpha*rC[2][2];
+    C[24*ldc] = alpha*rC[2][3];
+    C[32*ldc] = alpha*rC[2][4];
+    C[40*ldc] = alpha*rC[2][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[8*ldc] = alpha*rC[3][1] ;
+    C[16*ldc] = alpha*rC[3][2];
+    C[24*ldc] = alpha*rC[3][3];
+    C[32*ldc] = alpha*rC[3][4];
+    C[40*ldc] = alpha*rC[3][5];
+    C+=8;                     
+    C[0*ldc] = alpha*rC[4][0] ;
+    C[8*ldc] = alpha*rC[4][1] ;
+    C[16*ldc] = alpha*rC[4][2];
+    C[24*ldc] = alpha*rC[4][3];
+    C[32*ldc] = alpha*rC[4][4];
+    C[40*ldc] = alpha*rC[4][5];
+    C+=8;                     
+    C[0] = alpha*rC[5][0]     ;
+    C[8*ldc] = alpha*rC[5][1] ;
+    C[16*ldc] = alpha*rC[5][2];
+    C[24*ldc] = alpha*rC[5][3];
+    C[32*ldc] = alpha*rC[5][4];
+    C[40*ldc] = alpha*rC[5][5];
+    
+}
+";
+
+static const char * dgemm_TN_1_1_1_8x16_4x2__ALPHABETA = " 
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_1_1_1_8x16_4x2__ALPHABETA(  __global double const * restrict A, 
+                                        __global double const * restrict B,
+                                        __global double * C, 
+                                        uint const M, 
+                                        uint const N, 
+                                        uint const K,
+                                        double const alpha,  
+                                        double const beta,
+                                        uint lda,
+                                        uint ldb, 
+                                        uint ldc, 
+                                        uint const offsetA, 
+                                        uint const offsetB, 
+                                        uint const offsetC)
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+   
+    __local double* plA = lA + idxT*33+ idyT;
+    __local double* plB = lB + idxT*33+ idyT;
+
+
+    if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+    {
+      int CurrentOffSetA =  idyT;
+      int CurrentOffSetB =  idyT;
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8*lda];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24*lda];
+
+        plB[0] = CurrentOffSetB>=N?0.0:B[0];
+        plB[8] = CurrentOffSetB+8>=N?0.0:B[8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < min(16u, K-block_k); k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+      }
+   
+      C+= gidx*32;
+      C+= idx;
+      C+= gidy*32*ldc;
+      C+= idy*ldc;
+
+      int OffSetCM = gidx*32+idx;
+      int OffSetCN = gidy*32+idy;
+      if(OffSetCM>=M || OffSetCN>=N)
+        return;
+
+      for(int i = 0; i<4; i++)
+      {
+        C[0*ldc] = alpha*rC[i][0] + beta*C[0];
+
+        if (OffSetCN+16<N)
+          C[16*ldc] = alpha*rC[i][1] + beta*C[16*ldc];
+
+        C+=8;
+        OffSetCM += 8;
+        if(OffSetCM>=M)
+          return;
+      }
+
+    }
+    else
+    {
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0];
+        plA[8] = A[8*lda];
+        plA[16] = A[16*lda];
+        plA[24] = A[24*lda];
+        plB[0] = B[0];
+        plB[8] = B[8*ldb];
+        plB[16] = B[16*ldb];
+        plB[24] = B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < min(16u, K-block_k); k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        
+        B += 16;
+        
+
+      }
+      C+= gidx*32;
+      C+= idx*1;
+      C+= gidy*32*ldc;
+      C+= idy*1*ldc;
+      C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+      C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+      C+=8;
+      C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+      C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+      C+=8;
+      C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+      C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+      C+=8;
+      C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+      C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+      
+    }
+}
+"; 
+
+
+static const char * dgemm_TN_1_1_1_8x16_4x2__ALPHA = " 
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_1_1_1_8x16_4x2__ALPHA(  __global double const * restrict A, 
+                                        __global double const * restrict B,
+                                        __global double * C, 
+                                        uint const M, 
+                                        uint const N, 
+                                        uint const K,
+                                        double const alpha,
+                                        uint lda,
+                                        uint ldb, 
+                                        uint ldc, 
+                                        uint const offsetA, 
+                                        uint const offsetB, 
+                                        uint const offsetC)
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+   
+    __local double* plA = lA + idxT*33+ idyT;
+    __local double* plB = lB + idxT*33+ idyT;
+
+
+    if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+    {
+      int CurrentOffSetA =  idyT;
+      int CurrentOffSetB =  idyT;
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8*lda];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24*lda];
+
+        plB[0] = CurrentOffSetB>=N?0.0:B[0];
+        plB[8] = CurrentOffSetB+8>=N?0.0:B[8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < min(16u, K-block_k); k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+      }
+   
+      C+= gidx*32;
+      C+= idx;
+      C+= gidy*32*ldc;
+      C+= idy*ldc;
+
+      int OffSetCM = gidx*32+idx;
+      int OffSetCN = gidy*32+idy;
+      if(OffSetCM>=M || OffSetCN>=N)
+        return;
+
+      for(int i = 0; i<4; i++)
+      {
+        C[0*ldc] = alpha*rC[i][0];
+
+        if (OffSetCN+16<N)
+          C[16*ldc] = alpha*rC[i][1];
+
+        C+=8;
+        OffSetCM += 8;
+        if(OffSetCM>=M)
+          return;
+      }
+
+    }
+    else
+    {
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0];
+        plA[8] = A[8*lda];
+        plA[16] = A[16*lda];
+        plA[24] = A[24*lda];
+        plB[0] = B[0];
+        plB[8] = B[8*ldb];
+        plB[16] = B[16*ldb];
+        plB[24] = B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < min(16u, K-block_k); k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        
+        B += 16;
+        
+
+      }
+      C+= gidx*32;
+      C+= idx;
+      C+= gidy*32*ldc;
+      C+= idy*ldc;
+      C[0*ldc] = alpha*rC[0][0];
+      C[16*ldc] = alpha*rC[0][1];
+      C+=8;
+      C[0*ldc] = alpha*rC[1][0];
+      C[16*ldc] = alpha*rC[1][1];
+      C+=8;
+      C[0*ldc] = alpha*rC[2][0];
+      C[16*ldc] = alpha*rC[2][1];
+      C+=8;
+      C[0*ldc] = alpha*rC[3][0];
+      C[16*ldc] = alpha*rC[3][1];
+      
+    }
+}
+"; 
+
+static const char * dgemm_TN_1_1_16_8x16_4x2__ALPHABETA = "
+
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_1_1_16_8x16_4x2__ALPHABETA(  __global double const * restrict A, 
+                                         __global double const * restrict B,
+                                         __global double * C,  
+                                         uint const M, 
+                                         uint const N, 
+                                         uint const K,
+                                         double const alpha,  
+                                         double const beta,   
+                                         uint lda,
+                                         uint ldb, 
+                                         uint ldc, 
+                                         uint const offsetA, 
+                                         uint const offsetB, 
+                                         uint const offsetC)
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+   
+    __local double* plA = lA + idxT*33+ idyT;
+    __local double* plB = lB + idxT*33+ idyT;
+
+
+    if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+    {
+      int CurrentOffSetA =  idyT;
+      int CurrentOffSetB =  idyT;
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8*lda];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24*lda];
+
+        plB[0] = CurrentOffSetB>=N?0.0:B[0];
+        plB[8] = CurrentOffSetB+8>=N?0.0:B[8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < 16; k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+      }
+   
+      C+= gidx*32;
+      C+= idx;
+      C+= gidy*32*ldc;
+      C+= idy*ldc;
+
+      int OffSetCM = gidx*32+idx;
+      int OffSetCN = gidy*32+idy;
+      if(OffSetCM>=M || OffSetCN>=N)
+        return;
+
+
+
+      for(int i = 0; i<4; i++)
+      {
+        C[0*ldc] = alpha*rC[i][0] + beta*C[0];
+
+        if (OffSetCN+16<N)
+          C[16*ldc] = alpha*rC[i][1] + beta*C[16*ldc];
+
+        C+=8;
+        OffSetCM += 8;
+        if(OffSetCM>=M)
+          return;
+      }
+
+    }
+    else
+    {
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0];
+        plA[8] = A[8*lda];
+        plA[16] = A[16*lda];
+        plA[24] = A[24*lda];
+        plB[0] = B[0];
+        plB[8] = B[8*ldb];
+        plB[16] = B[16*ldb];
+        plB[24] = B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < 16; k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+
+      }
+      C+= gidx*32;
+      C+= idx*1;
+      C+= gidy*32*ldc;
+      C+= idy*1*ldc;
+      C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+      C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+      C+=8;
+      C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+      C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+      C+=8;
+      C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+      C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+      C+=8;
+      C[0*ldc] = alpha*rC[3][0] + beta*C[0];
+      C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+      C+=8;
+    }
+}
+"; 
+
+
+static const char * dgemm_TN_1_1_16_8x16_4x2__ALPHA = "
+
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_1_1_16_8x16_4x2__ALPHA(  __global double const * restrict A, 
+                                         __global double const * restrict B,
+                                         __global double * C,  
+                                         uint const M, 
+                                         uint const N, 
+                                         uint const K,
+                                         double const alpha,  
+                                         uint lda,
+                                         uint ldb, 
+                                         uint ldc, 
+                                         uint const offsetA, 
+                                         uint const offsetB, 
+                                         uint const offsetC)
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+   
+    __local double* plA = lA + idxT*33+ idyT;
+    __local double* plB = lB + idxT*33+ idyT;
+
+
+    if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+    {
+      int CurrentOffSetA =  idyT;
+      int CurrentOffSetB =  idyT;
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8*lda];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24*lda];
+
+        plB[0] = CurrentOffSetB>=N?0.0:B[0];
+        plB[8] = CurrentOffSetB+8>=N?0.0:B[8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < 16; k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+      }
+   
+      C+= gidx*32;
+      C+= idx;
+      C+= gidy*32*ldc;
+      C+= idy*ldc;
+
+      int OffSetCM = gidx*32+idx;
+      int OffSetCN = gidy*32+idy;
+      if(OffSetCM>=M || OffSetCN>=N)
+        return;
+
+
+
+      for(int i = 0; i<4; i++)
+      {
+        C[0*ldc] = alpha*rC[i][0];
+
+        if (OffSetCN+16<N)
+          C[16*ldc] = alpha*rC[i][1];
+
+        C+=8;
+        OffSetCM += 8;
+        if(OffSetCM>=M)
+          return;
+      }
+
+    }
+    else
+    {
+
+      for( int block_k=0 ; block_k< K ; block_k+=16)
+      {
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0];
+        plA[8] = A[8*lda];
+        plA[16] = A[16*lda];
+        plA[24] = A[24*lda];
+        plB[0] = B[0];
+        plB[8] = B[8*ldb];
+        plB[16] = B[16*ldb];
+        plB[24] = B[24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx;
+        int offB = idy;
+
+        for( int k = 0 ; k < 16; k+=1)
+        {
+          rA[0][0] = lA[offA ];
+          rA[0][1] = lA[offA + 8];
+          rA[0][2] = lA[offA + 16];
+          rA[0][3] = lA[offA + 24];
+          rB[0][0] = lB[offB ];
+          rB[0][1] = lB[offB + 16];
+          offA += 33;
+          offB += 33;
+          rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+          rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+          rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+          rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+          rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+          rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+          rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+          rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+
+      }
+      C+= gidx*32;
+      C+= idx*1;
+      C+= gidy*32*ldc;
+      C+= idy*1*ldc;
+      C[0*ldc] = alpha*rC[0][0];
+      C[16*ldc] = alpha*rC[0][1];
+      C+=8;
+      C[0*ldc] = alpha*rC[1][0];
+      C[16*ldc] = alpha*rC[1][1];
+      C+=8;
+      C[0*ldc] = alpha*rC[2][0];
+      C[16*ldc] = alpha*rC[2][1];
+      C+=8;
+      C[0*ldc] = alpha*rC[3][0];
+      C[16*ldc] = alpha*rC[3][1];
+      
+    }
+}
+"; 
+
+
+static const char * dgemm_TN_32_32_1_8x16_4x2__ALPHABETA = "
+
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_32_32_1_8x16_4x2__ALPHABETA(  __global double const * restrict A, 
+                                          __global double const * restrict B,
+                                          __global double * C,  
+                                          uint const M, 
+                                          uint const N, 
+                                          uint const K,
+                                          double const alpha,  
+                                          double const beta,   
+                                          uint lda,
+                                          uint ldb, 
+                                          uint ldc, 
+                                          uint const offsetA, 
+                                          uint const offsetB, 
+                                          uint const offsetC)
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+    
+    for( int block_k=0 ; block_k< K ; block_k+=16)
+    {
+        __local double* plA = lA + idxT*33+ idyT;
+        __local double* plB = lB + idxT*33+ idyT;
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0+0*lda];
+        plA[8] = A[0+8*lda];
+        plA[16] = A[0+16*lda];
+        plA[24] = A[0+24*lda];
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+        
+        for( int k = 0 ; k < min(16u, K-block_k); k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 16];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+    }
+    C+= gidx*32;
+    C+= idx*1;
+    C+= gidy*32*ldc;
+    C+= idy*1*ldc;
+    C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C+=8;
+}
+
+"; 
+
+
+static const char * dgemm_TN_32_32_1_8x16_4x2__ALPHA = "
+
+
+__attribute__( (reqd_work_group_size(8, 16, 1)) )
+__kernel void dgemm_TN_32_32_1_8x16_4x2__ALPHA(  __global double const * restrict A, 
+                                          __global double const * restrict B,
+                                          __global double * C,  
+                                          uint const M, 
+                                          uint const N, 
+                                          uint const K,
+                                          double const alpha,  
+                                          uint lda,
+                                          uint ldb, 
+                                          uint ldc, 
+                                          uint const offsetA, 
+                                          uint const offsetB, 
+                                          uint const offsetC)
+{
+    double rC[4][2]  = {(double)0};
+    double rA[1][4];
+    double rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local double lA[528];
+    __local double lB[528];
+    
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx = get_local_id(0);
+    int idy = get_local_id(1);
+    
+    int idt = 8*idy + idx;
+    int idxT = idt % 16;
+    int idyT = idt / 16;
+    
+    A +=  gidx*32*lda + idxT + idyT*lda;
+    B +=  gidy*32*ldb+ idxT + idyT*ldb;
+    
+    for( int block_k=0 ; block_k< K ; block_k+=16)
+    {
+        __local double* plA = lA + idxT*33+ idyT;
+        __local double* plB = lB + idxT*33+ idyT;
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        plA[0] = A[0+0*lda];
+        plA[8] = A[0+8*lda];
+        plA[16] = A[0+16*lda];
+        plA[24] = A[0+24*lda];
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+       
+        int offA = 1*idx;
+        int offB = 1*idy;
+        
+        for( int k = 0 ; k < min(16u, K-block_k); k+=1)
+        {
+            rA[0][0] = lA[offA + 0];
+            rA[0][1] = lA[offA + 8];
+            rA[0][2] = lA[offA + 16];
+            rA[0][3] = lA[offA + 24];
+            rB[0][0] = lB[offB + 0];
+            rB[0][1] = lB[offB + 16];
+            offA += 33;
+            offB += 33;
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        }
+        A += 16;
+        B += 16;
+    }
+    C+= gidx*32;
+    C+= idx*1;
+    C+= gidy*32*ldc;
+    C+= idy*1*ldc;
+    C[0*ldc] = alpha*rC[0][0];
+    C[16*ldc] = alpha*rC[0][1];
+    C+=8;
+    C[0*ldc] = alpha*rC[1][0];
+    C[16*ldc] = alpha*rC[1][1];
+    C+=8;
+    C[0*ldc] = alpha*rC[2][0];
+    C[16*ldc] = alpha*rC[2][1];
+    C+=8;
+    C[0*ldc] = alpha*rC[3][0];
+    C[16*ldc] = alpha*rC[3][1];
+    
+}
+
+"; 
+
+static const char * dgemm_NN_48_48_8_8x8_6x6__ALPHABETA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_48_48_8_8x8_6x6__ALPHABETA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  double const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[6][6]  = {(double)0};
+  double rA[1][6];
+  double rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+  __local double* plA = lA + idyT*49 + idxT;
+  __local double* plB = lB + idxT*49 + idyT;
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0] = A[0+0*lda];
+    plA[8] = A[8+0*lda];
+    plA[16] = A[16+0*lda];
+    plA[24] = A[24+0*lda];
+    plA[32] = A[32+0*lda];
+    plA[40] = A[40+0*lda];
+    plB[0] = B[0+0*ldb];
+    plB[8] = B[0+8*ldb];
+    plB[16] = B[0+16*ldb];
+    plB[24] = B[0+24*ldb];
+    plB[32] = B[0+32*ldb];
+    plB[40] = B[0+40*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+    
+    int offA = 1*idx;
+    int offB = 1*idy;
+    
+    for( int k = 0 ; k < 8; k+=1){
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rA[0][4] = lA[offA + 32];
+      rA[0][5] = lA[offA + 40];
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      rB[0][4] = lB[offB + 32];
+      rB[0][5] = lB[offB + 40];
+      offA += 49;
+      offB += 49;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+      rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]);
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+      rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]);
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+      rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]);
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+      rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]);
+      rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+      rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+      rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+      rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+      rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+      rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]);
+      rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]);
+      rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]);
+      rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]);
+      rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]);
+      rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]);
+      rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]);
+    }
+    A += 8*lda;
+    B += 8;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*48;
+  C+= idx;
+  C+= gidy*48*ldc;
+  C+= idy*ldc;
+ 
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[0][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[0][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[1][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[1][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[2][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[2][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[3][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[3][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[4][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[4][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[4][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[4][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[4][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[5][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[5][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[5][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[5][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[5][5] + beta*C[40*ldc];
+ 
+}
+
+";
+
+static const char * dgemm_NN_48_48_8_8x8_6x6__ALPHA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_48_48_8_8x8_6x6__ALPHA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[6][6]  = {(double)0};
+  double rA[1][6];
+  double rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+  __local double* plA = lA + idyT*49 + idxT;
+  __local double* plB = lB + idxT*49 + idyT;
+
+
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0] = A[0+0*lda];
+    plA[8] = A[8+0*lda];
+    plA[16] = A[16+0*lda];
+    plA[24] = A[24+0*lda];
+    plA[32] = A[32+0*lda];
+    plA[40] = A[40+0*lda];
+    plB[0] = B[0+0*ldb];
+    plB[8] = B[0+8*ldb];
+    plB[16] = B[0+16*ldb];
+    plB[24] = B[0+24*ldb];
+    plB[32] = B[0+32*ldb];
+    plB[40] = B[0+40*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+    
+    int offA = 1*idx;
+    int offB = 1*idy;
+    
+    for( int k = 0 ; k < 8; k+=1){
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rA[0][4] = lA[offA + 32];
+      rA[0][5] = lA[offA + 40];
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      rB[0][4] = lB[offB + 32];
+      rB[0][5] = lB[offB + 40];
+      offA += 49;
+      offB += 49;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+      rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]);
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+      rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]);
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+      rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]);
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+      rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]);
+      rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+      rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+      rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+      rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+      rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+      rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]);
+      rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]);
+      rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]);
+      rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]);
+      rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]);
+      rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]);
+      rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]);
+    }
+    A += 8*lda;
+    B += 8;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*48;
+  C+= idx;
+  C+= gidy*48*ldc;
+  C+= idy*ldc;
+ 
+	C[0*ldc] = alpha*rC[0][0] ;
+  C[8*ldc] = alpha*rC[0][1] ;
+  C[16*ldc] = alpha*rC[0][2];
+  C[24*ldc] = alpha*rC[0][3];
+  C[32*ldc] = alpha*rC[0][4];
+  C[40*ldc] = alpha*rC[0][5];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[8*ldc] = alpha*rC[1][1] ;
+  C[16*ldc] = alpha*rC[1][2];
+  C[24*ldc] = alpha*rC[1][3];
+  C[32*ldc] = alpha*rC[1][4];
+  C[40*ldc] = alpha*rC[1][5];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[8*ldc] = alpha*rC[2][1] ;
+  C[16*ldc] = alpha*rC[2][2];
+  C[24*ldc] = alpha*rC[2][3];
+  C[32*ldc] = alpha*rC[2][4];
+  C[40*ldc] = alpha*rC[2][5];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[3][0] ;
+  C[8*ldc] = alpha*rC[3][1] ;
+  C[16*ldc] = alpha*rC[3][2];
+  C[24*ldc] = alpha*rC[3][3];
+  C[32*ldc] = alpha*rC[3][4];
+  C[40*ldc] = alpha*rC[3][5];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[4][0] ;
+  C[8*ldc] = alpha*rC[4][1] ;
+  C[16*ldc] = alpha*rC[4][2];
+  C[24*ldc] = alpha*rC[4][3];
+  C[32*ldc] = alpha*rC[4][4];
+  C[40*ldc] = alpha*rC[4][5];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[5][0] ;
+  C[8*ldc] = alpha*rC[5][1] ;
+  C[16*ldc] = alpha*rC[5][2];
+  C[24*ldc] = alpha*rC[5][3];
+  C[32*ldc] = alpha*rC[5][4];
+  C[40*ldc] = alpha*rC[5][5];
+ 
+}
+
+";
+
+
+static const char * dgemm_NN_32_32_8_8x8_4x4__ALPHABETA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_32_32_8_8x8_4x4__ALPHABETA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  double const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+  double PreFetchA[4];
+  double PreFetchB[4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+
+  __local double* plA = lA + idyT*33 + idxT;
+  __local double* plB = lB + idxT*33 + idyT;
+
+
+    plA[0] = A[0];
+    plA[8] = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plB[0] = B[0];
+    plB[8] = B[8*ldb];
+    plB[16] = B[16*ldb];
+    plB[24] = B[24*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  for( int block_k=0 ; block_k< K ; block_k+=8)
+  {
+
+   // barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+	PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8*ldb];
+	PreFetchB[2] = B[16*ldb];
+	PreFetchB[3] = B[24*ldb];
+
+   /* plA[0] = A[0+0*lda];
+    plA[8] = A[8+0*lda];
+    plA[16] = A[16+0*lda];
+    plA[24] = A[24+0*lda];
+    plB[0] = B[0+0*ldb];
+    plB[8] = B[0+8*ldb];
+    plB[16] = B[0+16*ldb];
+    plB[24] = B[0+24*ldb];*/
+   // barrier(CLK_LOCAL_MEM_FENCE);
+
+    int offA = idx;
+    int offB = idy;
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      offA += 33;
+      offB += 33;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+    }
+
+	barrier(CLK_LOCAL_MEM_FENCE);
+	plA[0]  = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    plA[24] = PreFetchA[3];
+    plB[0]  = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    plB[24] = PreFetchB[3];
+	barrier(CLK_LOCAL_MEM_FENCE);
+   /* A += 8*lda;
+    B += 8;*/
+  }
+
+
+  C+= gidx*32;
+  C+= idx;
+  C+= gidy*32*ldc;
+  C+= idy*ldc;
+  
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+  
+}
+";
+
+
+static const char * dgemm_NN_32_32_8_8x8_4x4__ALPHA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_32_32_8_8x8_4x4__ALPHA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+  double PreFetchA[4];
+  double PreFetchB[4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+
+  __local double* plA = lA + idyT*33 + idxT;
+  __local double* plB = lB + idxT*33 + idyT;
+
+
+    plA[0] = A[0];
+    plA[8] = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plB[0] = B[0];
+    plB[8] = B[8*ldb];
+    plB[16] = B[16*ldb];
+    plB[24] = B[24*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  for( int block_k=0 ; block_k< K ; block_k+=8)
+  {
+
+   // barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+	PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8*ldb];
+	PreFetchB[2] = B[16*ldb];
+	PreFetchB[3] = B[24*ldb];
+
+   /* plA[0] = A[0+0*lda];
+    plA[8] = A[8+0*lda];
+    plA[16] = A[16+0*lda];
+    plA[24] = A[24+0*lda];
+    plB[0] = B[0+0*ldb];
+    plB[8] = B[0+8*ldb];
+    plB[16] = B[0+16*ldb];
+    plB[24] = B[0+24*ldb];*/
+   // barrier(CLK_LOCAL_MEM_FENCE);
+
+    int offA = idx;
+    int offB = idy;
+
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      offA += 33;
+      offB += 33;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+    }
+
+	barrier(CLK_LOCAL_MEM_FENCE);
+	plA[0]  = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    plA[24] = PreFetchA[3];
+    plB[0]  = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    plB[24] = PreFetchB[3];
+	barrier(CLK_LOCAL_MEM_FENCE);
+   /* A += 8*lda;
+    B += 8;*/
+  }
+
+
+  C+= gidx*32;
+  C+= idx;
+  C+= gidy*32*ldc;
+  C+= idy*ldc;
+  
+  C[0*ldc] = alpha*rC[0][0] ;
+  C[8*ldc] = alpha*rC[0][1] ;
+  C[16*ldc] = alpha*rC[0][2];
+  C[24*ldc] = alpha*rC[0][3];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[8*ldc] = alpha*rC[1][1] ;
+  C[16*ldc] = alpha*rC[1][2];
+  C[24*ldc] = alpha*rC[1][3];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[8*ldc] = alpha*rC[2][1] ;
+  C[16*ldc] = alpha*rC[2][2];
+  C[24*ldc] = alpha*rC[2][3];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[3][0] ;
+  C[8*ldc] = alpha*rC[3][1] ;
+  C[16*ldc] = alpha*rC[3][2];
+  C[24*ldc] = alpha*rC[3][3];
+  
+}
+";
+
+
+static const char * dgemm_NN_1_1_8_8x8_4x4__ALPHABETA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_1_1_8_8x8_4x4__ALPHABETA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  double const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+idxT;
+  __local double* plB = lB + idxT*33+ idyT;
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+    int CurrentOffSetA =   gidx*32 + idxT;
+    int CurrentOffSetB =  idyT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+   
+      plA[0] = CurrentOffSetA>=M?0.0:A[0];
+      plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+      plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+     
+      plB[0] = CurrentOffSetB>=N?0.0:B[0];
+      plB[8] = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+      plB[16] = CurrentOffSetB+16>=N?0.0: B[0+16*ldb];
+      plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+     
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+      return;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0] + beta*C[0];
+       
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1] + beta*C[8*ldc];
+
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2] + beta*C[16*ldc];
+    
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3] + beta*C[24*ldc];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+        return;
+    }
+  }
+
+  else
+  {
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0+0*lda];
+      plA[8] = A[8+0*lda];
+      plA[16] = A[16+0*lda];
+      plA[24] = A[24+0*lda];
+      plB[0] = B[0+0*ldb];
+      plB[8] = B[0+8*ldb];
+      plB[16] = B[0+16*ldb];
+      plB[24] = B[0+24*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+      
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0] + beta*C[0];
+      C[8*ldc] = alpha*rC[i][1] + beta*C[8*ldc];
+      C[16*ldc] = alpha*rC[i][2] + beta*C[16*ldc];
+      C[24*ldc] = alpha*rC[i][3] + beta*C[24*ldc];
+      C+=8;
+    }
+  }
+}
+";
+
+static const char * dgemm_NN_1_1_8_8x8_4x4__ALPHA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_1_1_8_8x8_4x4__ALPHA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+idxT;
+  __local double* plB = lB + idxT*33+ idyT;
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+    int CurrentOffSetA =   gidx*32 + idxT;
+    int CurrentOffSetB =  idyT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+   
+      plA[0] = CurrentOffSetA>=M?0.0:A[0];
+      plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+      plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+     
+      plB[0] = CurrentOffSetB>=N?0.0:B[0];
+      plB[8] = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+      plB[16] = CurrentOffSetB+16>=N?0.0: B[0+16*ldb];
+      plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+     
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+      return;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0];
+       
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1];
+
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2];
+    
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+        return;
+    }
+  }
+
+  else
+  {
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0+0*lda];
+      plA[8] = A[8+0*lda];
+      plA[16] = A[16+0*lda];
+      plA[24] = A[24+0*lda];
+      plB[0] = B[0+0*ldb];
+      plB[8] = B[0+8*ldb];
+      plB[16] = B[0+16*ldb];
+      plB[24] = B[0+24*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+      
+      for( int k = 0 ; k < 8; k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0];
+      C[8*ldc] = alpha*rC[i][1];
+      C[16*ldc] = alpha*rC[i][2] ;
+      C[24*ldc] = alpha*rC[i][3];
+      C+=8;
+    }
+  }
+}
+";
+
+static const char * dgemm_NN_1_1_1_8x8_4x4__ALPHABETA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_1_1_1_8x8_4x4__ALPHABETA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  double const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+idxT;
+  __local double* plB = lB + idxT*33+ idyT;
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+    int CurrentOffSetA =   gidx*32 + idxT;
+    int CurrentOffSetB =  idyT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+   
+      plA[0] = CurrentOffSetA>=M?0.0:A[0];
+      plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+      plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+     
+      plB[0] = CurrentOffSetB>=N?0.0:B[0];
+      plB[8] = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+      plB[16] = CurrentOffSetB+16>=N?0.0: B[0+16*ldb];
+      plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+     
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+      return;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0] + beta*C[0];
+       
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1] + beta*C[8*ldc];
+
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2] + beta*C[16*ldc];
+    
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3] + beta*C[24*ldc];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+        return;
+    }
+  }
+
+  else
+  {
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0+0*lda];
+      plA[8] = A[8+0*lda];
+      plA[16] = A[16+0*lda];
+      plA[24] = A[24+0*lda];
+      plB[0] = B[0+0*ldb];
+      plB[8] = B[0+8*ldb];
+      plB[16] = B[0+16*ldb];
+      plB[24] = B[0+24*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+      
+      int offA = idx;
+      int offB = idy;
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0] + beta*C[0];
+      C[8*ldc] = alpha*rC[i][1] + beta*C[8*ldc];
+      C[16*ldc] = alpha*rC[i][2] + beta*C[16*ldc];
+      C[24*ldc] = alpha*rC[i][3] + beta*C[24*ldc];
+      C+=8;
+    }
+  }
+}
+";
+
+
+static const char * dgemm_NN_1_1_1_8x8_4x4__ALPHA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_1_1_1_8x8_4x4__ALPHA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+idxT;
+  __local double* plB = lB + idxT*33+ idyT;
+
+  if(gidx==get_num_groups(0)-1 || gidy==get_num_groups(1)-1 )
+  {
+    int CurrentOffSetA =   gidx*32 + idxT;
+    int CurrentOffSetB =  idyT;
+
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+   
+      plA[0] = CurrentOffSetA>=M?0.0:A[0];
+      plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+      plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+      plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+     
+      plB[0] = CurrentOffSetB>=N?0.0:B[0];
+      plB[8] = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+      plB[16] = CurrentOffSetB+16>=N?0.0: B[0+16*ldb];
+      plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+     
+      barrier(CLK_LOCAL_MEM_FENCE);
+     
+      int offA = idx;
+      int offB = idy;
+
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+    int OffSetCM = gidx*32+idx;
+    int OffSetCN = gidy*32+idy;
+    if(OffSetCM>=M || OffSetCN>=N)
+      return;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0];
+       
+      if (OffSetCN+8<N)
+        C[8*ldc] = alpha*rC[i][1];
+
+      if (OffSetCN+16<N)
+        C[16*ldc] = alpha*rC[i][2];
+    
+      if (OffSetCN+24<N)
+        C[24*ldc] = alpha*rC[i][3];
+
+      C+=8;
+      OffSetCM += 8;
+      if(OffSetCM>=M)
+        return;
+    }
+  }
+
+  else
+  {
+    for( int block_k=0 ; block_k< K ; block_k+=8)
+    {
+      barrier(CLK_LOCAL_MEM_FENCE);
+      plA[0] = A[0+0*lda];
+      plA[8] = A[8+0*lda];
+      plA[16] = A[16+0*lda];
+      plA[24] = A[24+0*lda];
+      plB[0] = B[0+0*ldb];
+      plB[8] = B[0+8*ldb];
+      plB[16] = B[0+16*ldb];
+      plB[24] = B[0+24*ldb];
+      barrier(CLK_LOCAL_MEM_FENCE);
+      
+      int offA = idx;
+      int offB = idy;
+      for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+      {
+        rA[0][0] = lA[offA + 0];
+        rA[0][1] = lA[offA + 8];
+        rA[0][2] = lA[offA + 16];
+        rA[0][3] = lA[offA + 24];
+        rB[0][0] = lB[offB + 0];
+        rB[0][1] = lB[offB + 8];
+        rB[0][2] = lB[offB + 16];
+        rB[0][3] = lB[offB + 24];
+        offA += 33;
+        offB += 33;
+        rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+        rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+        rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+        rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+        rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+        rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+        rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+        rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+        rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+        rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+        rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+        rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+        rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+        rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+        rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+        rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      }
+      A += 8*lda;
+      B += 8;
+    }
+    C+= gidx*32;
+    C+= idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+
+
+
+    for (int i = 0; i<4; i++)
+    {
+      C[0] = alpha*rC[i][0];
+      C[8*ldc] = alpha*rC[i][1];
+      C[16*ldc] = alpha*rC[i][2];
+      C[24*ldc] = alpha*rC[i][3];
+      C+=8;
+    }
+  }
+}
+";
+
+
+static const char * dgemm_NN_32_32_1_8x8_4x4__ALPHABETA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_32_32_1_8x8_4x4__ALPHABETA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  double const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+  double PreFetchA [4];
+  double PreFetchB [4];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+1*idxT;
+  __local double* plB = lB + idxT*33+ idyT;
+
+
+  plA[0]  = A[0];
+  plA[8]  = A[8];
+  plA[16] = A[16];
+  plA[24] = A[24];
+  plB[0]  = B[0];
+  plB[8]  = B[0+8*ldb];
+  plB[16] = B[0+16*ldb];
+  plB[24] = B[0+24*ldb];
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for( int block_k=0 ; block_k< K ; block_k+=8)
+  {
+    
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+	PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8*ldb];
+	PreFetchB[2] = B[16*ldb];
+	PreFetchB[3] = B[24*ldb];
+
+    int offA = idx;
+    int offB = idy;
+
+    for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      offA += 33;
+      offB += 33;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+    }
+
+	barrier(CLK_LOCAL_MEM_FENCE);
+	plA[0]  = PreFetchA[0]; 
+	plA[8]  = PreFetchA[1];
+	plA[16] = PreFetchA[2];
+	plA[24] = PreFetchA[3];
+	plB[0]  = PreFetchB[0];
+	plB[8]  = PreFetchB[1];
+	plB[16] = PreFetchB[2];
+	plB[24] = PreFetchB[3];
+	barrier(CLK_LOCAL_MEM_FENCE);
+    
+  }
+
+  C+= gidx*32;
+  C+= idx*1;
+  C+= gidy*32*ldc;
+  C+= idy*1*ldc;
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+  C+=8;
+}
+";
+
+static const char * dgemm_NN_32_32_1_8x8_4x4__ALPHA = "
+__attribute__( (reqd_work_group_size(8, 8, 1)) )
+  __kernel void dgemm_NN_32_32_1_8x8_4x4__ALPHA(__global double const * restrict A,
+  __global double const * restrict B,
+  __global double * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  double const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  double rC[4][4]  = {(double)0};
+  double rA[1][4];
+  double rB[1][4];
+
+  double PreFetchA [4];
+  double PreFetchB [4];
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[264];
+  __local double lB[264];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*32+ idxT + idyT*lda;
+  B +=  gidy*32*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*33+1*idxT;
+  __local double* plB = lB + idxT*33+ idyT;
+
+
+  plA[0]  = A[0];
+  plA[8]  = A[8];
+  plA[16] = A[16];
+  plA[24] = A[24];
+  plB[0]  = B[0];
+  plB[8]  = B[0+8*ldb];
+  plB[16] = B[0+16*ldb];
+  plB[24] = B[0+24*ldb];
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  for( int block_k=0 ; block_k< K ; block_k+=8)
+  {
+    
+    //barrier(CLK_LOCAL_MEM_FENCE);
+    A += 8*lda;
+    B += 8;
+
+	PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8*ldb];
+	PreFetchB[2] = B[16*ldb];
+	PreFetchB[3] = B[24*ldb];
+
+    int offA = idx;
+    int offB = idy;
+
+    for( int k = 0 ; k < min(8u, K-block_k); k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      offA += 33;
+      offB += 33;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+    }
+
+	barrier(CLK_LOCAL_MEM_FENCE);
+	plA[0]  = PreFetchA[0]; 
+	plA[8]  = PreFetchA[1];
+	plA[16] = PreFetchA[2];
+	plA[24] = PreFetchA[3];
+	plB[0]  = PreFetchB[0];
+	plB[8]  = PreFetchB[1];
+	plB[16] = PreFetchB[2];
+	plB[24] = PreFetchB[3];
+	barrier(CLK_LOCAL_MEM_FENCE);
+    
+  }
+
+  C+= gidx*32;
+  C+= idx;
+  C+= gidy*32*ldc;
+  C+= idy*ldc;
+  C[0*ldc] = alpha*rC[0][0] ;
+  C[8*ldc] = alpha*rC[0][1] ;
+  C[16*ldc] = alpha*rC[0][2];
+  C[24*ldc] = alpha*rC[0][3];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[8*ldc] = alpha*rC[1][1] ;
+  C[16*ldc] = alpha*rC[1][2];
+  C[24*ldc] = alpha*rC[1][3];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[8*ldc] = alpha*rC[2][1] ;
+  C[16*ldc] = alpha*rC[2][2];
+  C[24*ldc] = alpha*rC[2][3];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[3][0] ;
+  C[8*ldc] = alpha*rC[3][1] ;
+  C[16*ldc] = alpha*rC[3][2];
+  C[24*ldc] = alpha*rC[3][3];
+  
+}
+";
+
+
+static const char * dgemm_NN_40_40_8_8x8_5x5__ALPHABETA = "
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NN_40_40_8_8x8_5x5__ALPHABETA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha,  
+  double const beta,   
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[5][5]  = {(double)0};
+  double rA[1][5];
+  double rB[1][5];
+
+  double PreFetchA[5];
+  double PreFetchB[5];
+
+  //double PreFetchA_5;
+  //double PreFetchB_5;
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[320];
+  __local double lB[320];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*40+ idxT + idyT*lda;
+  B +=  gidy*40*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*40+idxT;
+  __local double* plB = lB + idxT*40+idyT;
+
+
+  plA[0]  = A[0];
+  plA[8]  = A[8];
+  plA[16] = A[16];
+  plA[24] = A[24];
+  plA[32] = A[32]; 
+  plB[0]  = B[0];
+  plB[8]  = B[8*ldb];
+  plB[16] = B[16*ldb];
+  plB[24] = B[24*ldb];
+  plB[32] = B[32*ldb];
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+ /*   barrier(CLK_LOCAL_MEM_FENCE);
+
+
+    plA[0] = A[0];
+    plA[8] = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plA[32] = A[32]; 
+    plB[0] = B[0];
+    plB[8] = B[8*ldb];
+    plB[16] = B[16*ldb];
+    plB[24] = B[24*ldb];
+    plB[32] = B[32*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+*/
+
+    A += 8*lda;
+    B += 8;
+	 
+    PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+	PreFetchA[4] = A[32]; 
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8*ldb];
+	PreFetchB[2] = B[16*ldb];
+	PreFetchB[3] = B[24*ldb];
+	PreFetchB[4] = B[32*ldb];
+    
+    
+
+
+
+    int offA = idx;
+    int offB = idy;
+   // int off256 = 256;
+
+
+#pragma unroll 1
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rA[0][4] = lA[offA + 32];
+
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      rB[0][4] = lB[offB + 32];
+
+
+      offA += 40;
+      offB += 40;
+     // off256 -= 24;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+
+      rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+      rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+      rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+      rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+      rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]  = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    plA[24] = PreFetchA[3];
+    plA[32] = PreFetchA[4];
+    plB[0]  = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    plB[24] = PreFetchB[3];
+    plB[32] = PreFetchB[4];
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    //  A += 8*lda;
+    //  B += 8;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*40;
+  C+= idx;
+  C+= gidy*40*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[0][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[1][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[2][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[3][4] + beta*C[32*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[4][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[4][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[4][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[4][4] + beta*C[32*ldc];
+
+}
+";
+
+
+static const char * dgemm_NN_40_40_8_8x8_5x5__ALPHA = "
+__attribute__((reqd_work_group_size(8,8,1)))
+  __kernel void dgemm_NN_40_40_8_8x8_5x5__ALPHA( __global double const * restrict A, 
+  __global double const * restrict B,
+  __global double * C,  
+  uint const M, 
+  uint const N, 
+  uint const K,
+  double const alpha,  
+  uint lda,
+  uint ldb, 
+  uint ldc, 
+  uint const offsetA,
+  uint const offsetB,
+  uint const offsetC)
+{
+  double rC[5][5]  = {(double)0};
+  double rA[1][5];
+  double rB[1][5];
+
+  double PreFetchA[5];
+  double PreFetchB[5];
+
+  //double PreFetchA_5;
+  //double PreFetchB_5;
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local double lA[320];
+  __local double lB[320];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*40+ idxT + idyT*lda;
+  B +=  gidy*40*ldb+ idxT + idyT*ldb;
+
+  __local double* plA = lA + idyT*40+idxT;
+  __local double* plB = lB + idxT*40+idyT;
+
+
+  plA[0]  = A[0];
+  plA[8]  = A[8];
+  plA[16] = A[16];
+  plA[24] = A[24];
+  plA[32] = A[32]; 
+  plB[0]  = B[0];
+  plB[8]  = B[8*ldb];
+  plB[16] = B[16*ldb];
+  plB[24] = B[24*ldb];
+  plB[32] = B[32*ldb];
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  
+  //for( int block_k=0 ; block_k< K ; block_k+=8)
+  uint block_k = K >> 3;
+  do
+  {
+ /*   barrier(CLK_LOCAL_MEM_FENCE);
+
+
+    plA[0] = A[0];
+    plA[8] = A[8];
+    plA[16] = A[16];
+    plA[24] = A[24];
+    plA[32] = A[32]; 
+    plB[0] = B[0];
+    plB[8] = B[8*ldb];
+    plB[16] = B[16*ldb];
+    plB[24] = B[24*ldb];
+    plB[32] = B[32*ldb];
+    barrier(CLK_LOCAL_MEM_FENCE);
+*/
+
+    A += 8*lda;
+    B += 8;
+	 
+    PreFetchA[0] = A[0];
+	PreFetchA[1] = A[8];
+	PreFetchA[2] = A[16];
+	PreFetchA[3] = A[24];
+	PreFetchA[4] = A[32]; 
+	PreFetchB[0] = B[0];
+	PreFetchB[1] = B[8*ldb];
+	PreFetchB[2] = B[16*ldb];
+	PreFetchB[3] = B[24*ldb];
+	PreFetchB[4] = B[32*ldb];
+    
+    
+
+
+
+    int offA = idx;
+    int offB = idy;
+   // int off256 = 256;
+
+
+#pragma unroll 1
+    for( int k = 0 ; k < 8; k+=1)
+    {
+      rA[0][0] = lA[offA + 0];
+      rA[0][1] = lA[offA + 8];
+      rA[0][2] = lA[offA + 16];
+      rA[0][3] = lA[offA + 24];
+      rA[0][4] = lA[offA + 32];
+
+      rB[0][0] = lB[offB + 0];
+      rB[0][1] = lB[offB + 8];
+      rB[0][2] = lB[offB + 16];
+      rB[0][3] = lB[offB + 24];
+      rB[0][4] = lB[offB + 32];
+
+
+      offA += 40;
+      offB += 40;
+     // off256 -= 24;
+      rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]);
+      rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]);
+      rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]);
+      rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]);
+      rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]);
+
+      rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]);
+      rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]);
+      rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]);
+      rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]);
+      rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]);
+
+      rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]);
+      rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]);
+      rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]);
+      rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]);
+      rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]);
+
+      rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]);
+      rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]);
+      rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]);
+      rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]);
+      rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]);
+
+      rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]);
+      rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]);
+      rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]);
+      rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]);
+      rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]);
+
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    plA[0]  = PreFetchA[0];
+    plA[8]  = PreFetchA[1];
+    plA[16] = PreFetchA[2];
+    plA[24] = PreFetchA[3];
+    plA[32] = PreFetchA[4];
+    plB[0]  = PreFetchB[0];
+    plB[8]  = PreFetchB[1];
+    plB[16] = PreFetchB[2];
+    plB[24] = PreFetchB[3];
+    plB[32] = PreFetchB[4];
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    //  A += 8*lda;
+    //  B += 8;
+  }
+  while (--block_k > 0);
+
+  C+= gidx*40;
+  C+= idx;
+  C+= gidy*40*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0] ;
+  C[8*ldc] = alpha*rC[0][1] ;
+  C[16*ldc] = alpha*rC[0][2];
+  C[24*ldc] = alpha*rC[0][3];
+  C[32*ldc] = alpha*rC[0][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[8*ldc] = alpha*rC[1][1] ;
+  C[16*ldc] = alpha*rC[1][2];
+  C[24*ldc] = alpha*rC[1][3];
+  C[32*ldc] = alpha*rC[1][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[8*ldc] = alpha*rC[2][1] ;
+  C[16*ldc] = alpha*rC[2][2];
+  C[24*ldc] = alpha*rC[2][3];
+  C[32*ldc] = alpha*rC[2][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[3][0] ;
+  C[8*ldc] = alpha*rC[3][1] ;
+  C[16*ldc] = alpha*rC[3][2];
+  C[24*ldc] = alpha*rC[3][3];
+  C[32*ldc] = alpha*rC[3][4];
+  C+=8;                     
+  C[0*ldc] = alpha*rC[4][0] ;
+  C[8*ldc] = alpha*rC[4][1] ;
+  C[16*ldc] = alpha*rC[4][2];
+  C[24*ldc] = alpha*rC[4][3];
+  C[32*ldc] = alpha*rC[4][4];
+
+}
+";
\ No newline at end of file
diff --git a/src/library/blas/gens/clTemplates/dgemm_hawaiiChannelConfilct.cl b/src/library/blas/gens/clTemplates/dgemm_hawaiiChannelConfilct.cl
new file mode 100644
index 0000000..53cf180
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/dgemm_hawaiiChannelConfilct.cl
@@ -0,0 +1,152 @@
+static const char * dgemm_NT_ChannelConflict = "
+
+typedef union GPtr {
+    __global double *d;
+    __global double2 *d2v;
+    __global double4 *d4v;
+    __global double8 *d8v;
+    __global double16 *d16v;
+} GPtr;
+
+
+
+__attribute__((reqd_work_group_size(8, 8, 1)))
+void __kernel
+dgemmBlockTempLocalPrefetch(__global double2 const * restrict A,
+                            __global double2 const * restrict B,
+                            __global double2 * C,
+                            uint const M,
+                            uint const N,
+                            uint const K,
+                            double const alpha,
+                            double const beta,
+                            uint lda,
+                            uint ldb,
+                            uint ldc,
+                            uint offsetA,
+                            uint offsetB,
+                            uint offsetC)
+{
+    double2 a0     ;
+    double b0[8], b1[8];
+	double b0T, b1T;
+    double2 c[8] = {(double2)(0,0)};
+    int4 coord = 0; /* contains coordB, coordA, k */
+
+    lda /= 2;
+    ldb /= 2;
+    
+    int get_group_id_1;
+    int get_global_id_1;
+    A += (int)get_global_id(0);
+    int lidY = get_local_id(1);
+   
+   
+    get_group_id_1 = (get_group_id(0) + get_group_id(1))% get_num_groups(1);
+    get_global_id_1 = get_group_id_1 * get_local_size(1) /*+ get_local_id(1)*/;
+
+    //kif = (N % 256 != 0);
+    //  get_global_id_1 = (kif*(uint)get_global_id(1)) + ((1-kif)*get_global_id_1);
+   
+  
+   
+    B += get_global_id_1 * 4 ;
+	   
+   
+    coord.y = 2 * (int)get_global_id(0);
+    coord.x = 8 * (get_global_id_1+lidY);
+
+	GPtr uB;
+	uB.d2v = B;
+
+	local double blockB [128];
+	
+	int lid = get_local_id(0)+8*lidY; 
+
+	
+	blockB[lid] = uB.d[lid];
+	blockB[lid+64] = uB.d[lid+2*ldb];
+	barrier(CLK_LOCAL_MEM_FENCE);
+	
+
+    for (int k1 = 0; k1 < K; k1 += 2)
+    {
+        /* -- Tiles multiplier -- */
+
+		///barrier(CLK_LOCAL_MEM_FENCE);
+		
+		uB.d2v += (ldb << 1);
+		b0T = uB.d[lid];
+		b1T = uB.d[lid+2*ldb];
+		a0 = A[0];
+		for (int i=0; i<8; i++)
+		{
+          b0[i] = blockB[i+8*lidY];
+		  b1[i] = blockB[i+64+8*lidY];	
+		}
+
+
+        
+		for (int i=0; i<8;i++)
+		  c[i] = mad(a0, b0[i], c[i]);
+
+        a0 = A[lda];
+
+		for (int i=0; i<8;i++)
+		  c[i] = mad(a0, b1[i], c[i]);
+
+
+        A += (lda << 1);
+
+		barrier(CLK_LOCAL_MEM_FENCE);
+		blockB[lid] = b0T;
+	    blockB[lid+64] = b1T;
+		barrier(CLK_LOCAL_MEM_FENCE);
+       // uB.d2v += (ldb << 1);
+        /* ---------------------- */
+    }
+
+    GPtr uC;
+
+    uC.d = C + (coord.x * ldc + coord.y)/2;
+
+    __global double2 *pC = uC.d2v;
+
+    double2 tempC0, tempC1, tempC2, tempC3, tempC4, tempC5, tempC6, tempC7;
+
+    tempC0 = pC[0];
+    tempC1 = pC[(ldc >> 1)];
+    tempC2 = pC[ldc];
+    tempC3 = pC[mad24(3u, (ldc >> 1), 0u)];
+    tempC4 = pC[(ldc << 1)];
+    tempC5 = pC[mad24(5u, (ldc >> 1), 0u)];
+    tempC6 = pC[mad24(6u, (ldc >> 1), 0u)];
+    tempC7 = pC[mad24(7u, (ldc >> 1), 0u)];
+    tempC0 = mad(tempC0, beta, 0);
+    tempC1 = mad(tempC1, beta, 0);
+    tempC2 = mad(tempC2, beta, 0);
+    tempC3 = mad(tempC3, beta, 0);
+    tempC4 = mad(tempC4, beta, 0);
+    tempC5 = mad(tempC5, beta, 0);
+    tempC6 = mad(tempC6, beta, 0);
+    tempC7 = mad(tempC7, beta, 0);
+    tempC0 = mad(c[0], alpha, tempC0);
+    tempC1 = mad(c[1], alpha, tempC1);
+    tempC2 = mad(c[2], alpha, tempC2);
+    tempC3 = mad(c[3], alpha, tempC3);
+    tempC4 = mad(c[4], alpha, tempC4);
+    tempC5 = mad(c[5], alpha, tempC5);
+    tempC6 = mad(c[6], alpha, tempC6);
+    tempC7 = mad(c[7], alpha, tempC7);
+    pC[0] = tempC0;
+    pC[(ldc >> 1)] = tempC1;
+    pC[ldc] = tempC2;
+    pC[mad24(3u, (ldc >> 1), 0u)] = tempC3;
+    pC[(ldc << 1)] = tempC4;
+    pC[mad24(5u, (ldc >> 1), 0u)] = tempC5;
+    pC[mad24(6u, (ldc >> 1), 0u)] = tempC6;
+    pC[mad24(7u, (ldc >> 1), 0u)] = tempC7;
+}
+
+
+";
\ No newline at end of file
diff --git a/src/library/blas/gens/clTemplates/dgemm_hawaiiSplitKernel.cl b/src/library/blas/gens/clTemplates/dgemm_hawaiiSplitKernel.cl
new file mode 100644
index 0000000..9cd28d4
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/dgemm_hawaiiSplitKernel.cl
@@ -0,0 +1,5043 @@
+static const char * dgemm_NT_8_SPLIT__ALPHABETA = "
+//static const char * dgemm_NT_48_48_8_8x8_6x6__ALPHABETA_SPLIT = "
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+
+#define  M6x6 \
+            rA[0] = lA[offA +  0];                        \
+            rA[1] = lA[offA +  1];                        \
+            rA[2] = lA[offA + 16];                        \
+            rA[3] = lA[offA + 17];                        \
+            rA[4] = lA[offA + 32];                        \
+            rA[5] = lA[offA + 33];                        \
+            rB[0] = lB[offB +  0];                        \
+            rB[1] = lB[offB +  1];                        \
+            rB[2] = lB[offB + 16];                        \
+            rB[3] = lB[offB + 17];                        \
+            rB[4] = lB[offB + 32];                        \
+            rB[5] = lB[offB + 33];                        \
+            offA += 48;                                   \
+            offB += 48;                                   \
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);         \
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);         \
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);         \
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);         \
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);         \
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);         \
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);         \
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);         \
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);         \
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);         \
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);         \
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);         \
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);         \
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);         \
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);         \
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);         \
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);         \
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);         \
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);         \
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);         \
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);         \
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);         \
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);         \
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);         \
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);         \
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);         \
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);         \
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);         \
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);         \
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);         \
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);         \
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);         \
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);         \
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);         \
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);         \
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);         \
+            barrier(CLK_LOCAL_MEM_FENCE);
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_48_8_8x8_6x6__ALPHABETA_SPLIT_MAIN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+        __local double2* plA = (__local double2*)(lA + idy*48 + 2*idx);
+        __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+        plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+    (C[(offset_x +  0) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y +  0) * ldc], alpha * rC[0][0]));
+    (C[(offset_x +  1) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y +  0) * ldc], alpha * rC[0][1]));
+    (C[(offset_x +  0) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y +  1) * ldc], alpha * rC[1][0]));
+    (C[(offset_x +  1) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y +  1) * ldc], alpha * rC[1][1]));
+    (C[(offset_x +  0) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 16) * ldc], alpha * rC[2][0]));
+    (C[(offset_x +  1) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 16) * ldc], alpha * rC[2][1]));
+    (C[(offset_x +  0) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 17) * ldc], alpha * rC[3][0]));
+    (C[(offset_x +  1) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 17) * ldc], alpha * rC[3][1]));
+    (C[(offset_x +  0) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 32) * ldc], alpha * rC[4][0]));
+    (C[(offset_x +  1) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 32) * ldc], alpha * rC[4][1]));
+    (C[(offset_x +  0) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 33) * ldc], alpha * rC[5][0]));
+    (C[(offset_x +  1) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 33) * ldc], alpha * rC[5][1]));
+    (C[(offset_x + 16) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y +  0) * ldc], alpha * rC[0][2]));
+    (C[(offset_x + 17) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y +  0) * ldc], alpha * rC[0][3]));
+    (C[(offset_x + 16) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y +  1) * ldc], alpha * rC[1][2]));
+    (C[(offset_x + 17) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y +  1) * ldc], alpha * rC[1][3]));
+    (C[(offset_x + 16) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 16) * ldc], alpha * rC[2][2]));
+    (C[(offset_x + 17) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 16) * ldc], alpha * rC[2][3]));
+    (C[(offset_x + 16) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 17) * ldc], alpha * rC[3][2]));
+    (C[(offset_x + 17) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 17) * ldc], alpha * rC[3][3]));
+    (C[(offset_x + 16) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 32) * ldc], alpha * rC[4][2]));
+    (C[(offset_x + 17) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 32) * ldc], alpha * rC[4][3]));
+    (C[(offset_x + 16) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 33) * ldc], alpha * rC[5][2]));
+    (C[(offset_x + 17) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 33) * ldc], alpha * rC[5][3]));
+    (C[(offset_x + 32) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y +  0) * ldc], alpha * rC[0][4]));
+    (C[(offset_x + 33) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y +  0) * ldc], alpha * rC[0][5]));
+    (C[(offset_x + 32) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y +  1) * ldc], alpha * rC[1][4]));
+    (C[(offset_x + 33) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y +  1) * ldc], alpha * rC[1][5]));
+    (C[(offset_x + 32) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 16) * ldc], alpha * rC[2][4]));
+    (C[(offset_x + 33) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 16) * ldc], alpha * rC[2][5]));
+    (C[(offset_x + 32) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 17) * ldc], alpha * rC[3][4]));
+    (C[(offset_x + 33) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 17) * ldc], alpha * rC[3][5]));
+    (C[(offset_x + 32) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 32) * ldc], alpha * rC[4][4]));
+    (C[(offset_x + 33) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 32) * ldc], alpha * rC[4][5]));
+    (C[(offset_x + 32) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 33) * ldc], alpha * rC[5][4]));
+    (C[(offset_x + 33) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 33) * ldc], alpha * rC[5][5]));
+}
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_48_8_8x8_6x6__ALPHABETA_SPLIT_ROW(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetA = 2*(gidx*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+	
+
+    int block_k = K >> 3;
+    do {
+        __local double* plA = (lA + idy*48 + 2*idx);
+       __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+       
+	    plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+	   
+//	    plB[0 ] = uB.d[0 ];
+//		plB[1 ] = uB.d[1 ];
+//      plB[16] = uB.d[16 ];
+//      plB[17] = uB.d[17];
+//      plB[32] = uB.d[32];
+//		plB[33] = uB.d[33];
+
+
+		plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+		plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+		plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = mad( beta, C[0]     , alpha * rC[0][2*i] );
+    C[ldc]    = mad( beta, C[ldc]   , alpha * rC[1][2*i] );
+    C[16*ldc] = mad( beta, C[16*ldc], alpha * rC[2][2*i] );
+    C[17*ldc] = mad( beta, C[17*ldc], alpha * rC[3][2*i] );
+    C[32*ldc] = mad( beta, C[32*ldc], alpha * rC[4][2*i] );
+    C[33*ldc] = mad( beta, C[33*ldc], alpha * rC[5][2*i] );
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1]        = mad( beta, C[1]     , alpha * rC[0][2*i+1] );
+    C[1+ldc]    = mad( beta, C[1+ldc]   , alpha * rC[1][2*i+1] );
+    C[1+16*ldc] = mad( beta, C[1+16*ldc], alpha * rC[2][2*i+1] );
+    C[1+17*ldc] = mad( beta, C[1+17*ldc], alpha * rC[3][2*i+1] );
+    C[1+32*ldc] = mad( beta, C[1+32*ldc], alpha * rC[4][2*i+1] );
+    C[1+33*ldc] = mad( beta, C[1+33*ldc], alpha * rC[5][2*i+1] );
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_1_8_8x8_6x6__ALPHABETA_SPLIT_COLUMN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+        __local double2* plA = (__local double2*)(lA + idy*48 + 2*idx);
+        __local double* plB = (lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        //plB[0 ] = uB.d2v[0 ];
+        //plB[8 ] = uB.d2v[8 ];
+        //plB[16] = uB.d2v[16];
+       plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+	   plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+	   plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+	   plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+	   plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+	   plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+
+	    plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if( offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]   = mad( beta, C[0], alpha * rC[0][2*i] );
+	C[1]   = mad( beta, C[1], alpha * rC[0][2*i+1] );
+
+	if(offset_y+1<N)
+	{
+      C[ldc]   = mad( beta, C[ldc], alpha * rC[1][2*i] );
+	  C[1+ldc] = mad( beta, C[1+ldc], alpha * rC[1][2*i+1] );
+	}
+
+	if(offset_y+16<N)
+	{
+      C[16*ldc]   = mad( beta, C[16*ldc], alpha * rC[2][2*i] );
+	  C[1+16*ldc] = mad( beta, C[1+16*ldc], alpha * rC[2][2*i+1] );
+    }
+
+	if(offset_y+17<N)
+	{
+      C[17*ldc]   = mad( beta, C[17*ldc], alpha * rC[3][2*i] );
+	  C[1+17*ldc] = mad( beta, C[1+17*ldc], alpha * rC[3][2*i+1] );
+    }
+
+	if(offset_y+32<N)
+	{
+	  C[32*ldc]   = mad( beta, C[32*ldc], alpha * rC[4][2*i] );
+	  C[1+32*ldc] = mad( beta, C[1+32*ldc], alpha * rC[4][2*i+1] );
+	}
+
+	if(offset_y+33<N)
+	{
+      C[33*ldc]   = mad( beta, C[33*ldc], alpha * rC[5][2*i] );
+	  C[1+33*ldc] = mad( beta, C[1+33*ldc], alpha * rC[5][2*i+1] );
+    }
+
+	C+=16;
+	
+  }
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_8_8x8_6x6__ALPHABETA_SPLIT_SINGLE(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetA = 2*(gidx*24 + idx);
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+        __local double* plA = (lA + idy*48 + 2*idx);
+        __local double* plB = (lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        //plB[0 ] = uB.d2v[0 ];
+        //plB[8 ] = uB.d2v[8 ];
+        //plB[16] = uB.d2v[16];
+        //plA[0 ] = uA.d2v[0 ];
+        //plA[8 ] = uA.d2v[8 ];
+        //plA[16] = uA.d2v[16];
+
+	   plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+	   plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+	   plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+	   plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+	   plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+	   plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+
+	    plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+		plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+		plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = mad( beta, C[0]     , alpha * rC[0][2*i] );
+	
+	if(offset_y+1<N)
+      C[ldc]    = mad( beta, C[ldc]   , alpha * rC[1][2*i] );
+    if(offset_y+16<N)
+      C[16*ldc] = mad( beta, C[16*ldc], alpha * rC[2][2*i] );
+	if(offset_y+17<N)
+      C[17*ldc] = mad( beta, C[17*ldc], alpha * rC[3][2*i] );
+	if(offset_y+32<N)
+      C[32*ldc] = mad( beta, C[32*ldc], alpha * rC[4][2*i] );
+	if(offset_y+33<N)
+      C[33*ldc] = mad( beta, C[33*ldc], alpha * rC[5][2*i] );
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1] = mad( beta, C[1]     , alpha * rC[0][2*i+1] );
+	
+	if(offset_y+1<N)
+      C[1+ldc]    = mad( beta, C[1+ldc]   , alpha * rC[1][2*i+1] );
+	if(offset_y+16<N)
+      C[1+16*ldc] = mad( beta, C[1+16*ldc], alpha * rC[2][2*i+1] );
+	if(offset_y+17<N)
+      C[1+17*ldc] = mad( beta, C[1+17*ldc], alpha * rC[3][2*i+1] );
+	if(offset_y+32<N)
+      C[1+32*ldc] = mad( beta, C[1+32*ldc], alpha * rC[4][2*i+1] );
+	if(offset_y+33<N)
+      C[1+33*ldc] = mad( beta, C[1+33*ldc], alpha * rC[5][2*i+1] );
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+";
+
+static const char * dgemm_NT_8_SPLIT__ALPHA = "
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+
+#define  M6x6 \
+            rA[0] = lA[offA +  0];                        \
+            rA[1] = lA[offA +  1];                        \
+            rA[2] = lA[offA + 16];                        \
+            rA[3] = lA[offA + 17];                        \
+            rA[4] = lA[offA + 32];                        \
+            rA[5] = lA[offA + 33];                        \
+            rB[0] = lB[offB +  0];                        \
+            rB[1] = lB[offB +  1];                        \
+            rB[2] = lB[offB + 16];                        \
+            rB[3] = lB[offB + 17];                        \
+            rB[4] = lB[offB + 32];                        \
+            rB[5] = lB[offB + 33];                        \
+            offA += 48;                                   \
+            offB += 48;                                   \
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);         \
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);         \
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);         \
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);         \
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);         \
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);         \
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);         \
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);         \
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);         \
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);         \
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);         \
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);         \
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);         \
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);         \
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);         \
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);         \
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);         \
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);         \
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);         \
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);         \
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);         \
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);         \
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);         \
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);         \
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);         \
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);         \
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);         \
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);         \
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);         \
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);         \
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);         \
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);         \
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);         \
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);         \
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);         \
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);         \
+            barrier(CLK_LOCAL_MEM_FENCE);
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_48_8_8x8_6x6__ALPHA_SPLIT_MAIN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+        __local double2* plA = (__local double2*)(lA + idy*48 + 2*idx);
+        __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+        plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+    (C[(offset_x +  0) + (offset_y +  0) * ldc] = alpha * rC[0][0]);
+    (C[(offset_x +  1) + (offset_y +  0) * ldc] = alpha * rC[0][1]);
+    (C[(offset_x +  0) + (offset_y +  1) * ldc] = alpha * rC[1][0]);
+    (C[(offset_x +  1) + (offset_y +  1) * ldc] = alpha * rC[1][1]);
+    (C[(offset_x +  0) + (offset_y + 16) * ldc] = alpha * rC[2][0]);
+    (C[(offset_x +  1) + (offset_y + 16) * ldc] = alpha * rC[2][1]);
+    (C[(offset_x +  0) + (offset_y + 17) * ldc] = alpha * rC[3][0]);
+    (C[(offset_x +  1) + (offset_y + 17) * ldc] = alpha * rC[3][1]);
+    (C[(offset_x +  0) + (offset_y + 32) * ldc] = alpha * rC[4][0]);
+    (C[(offset_x +  1) + (offset_y + 32) * ldc] = alpha * rC[4][1]);
+    (C[(offset_x +  0) + (offset_y + 33) * ldc] = alpha * rC[5][0]);
+    (C[(offset_x +  1) + (offset_y + 33) * ldc] = alpha * rC[5][1]);
+    (C[(offset_x + 16) + (offset_y +  0) * ldc] = alpha * rC[0][2]);
+    (C[(offset_x + 17) + (offset_y +  0) * ldc] = alpha * rC[0][3]);
+    (C[(offset_x + 16) + (offset_y +  1) * ldc] = alpha * rC[1][2]);
+    (C[(offset_x + 17) + (offset_y +  1) * ldc] = alpha * rC[1][3]);
+    (C[(offset_x + 16) + (offset_y + 16) * ldc] = alpha * rC[2][2]);
+    (C[(offset_x + 17) + (offset_y + 16) * ldc] = alpha * rC[2][3]);
+    (C[(offset_x + 16) + (offset_y + 17) * ldc] = alpha * rC[3][2]);
+    (C[(offset_x + 17) + (offset_y + 17) * ldc] = alpha * rC[3][3]);
+    (C[(offset_x + 16) + (offset_y + 32) * ldc] = alpha * rC[4][2]);
+    (C[(offset_x + 17) + (offset_y + 32) * ldc] = alpha * rC[4][3]);
+    (C[(offset_x + 16) + (offset_y + 33) * ldc] = alpha * rC[5][2]);
+    (C[(offset_x + 17) + (offset_y + 33) * ldc] = alpha * rC[5][3]);
+    (C[(offset_x + 32) + (offset_y +  0) * ldc] = alpha * rC[0][4]);
+    (C[(offset_x + 33) + (offset_y +  0) * ldc] = alpha * rC[0][5]);
+    (C[(offset_x + 32) + (offset_y +  1) * ldc] = alpha * rC[1][4]);
+    (C[(offset_x + 33) + (offset_y +  1) * ldc] = alpha * rC[1][5]);
+    (C[(offset_x + 32) + (offset_y + 16) * ldc] = alpha * rC[2][4]);
+    (C[(offset_x + 33) + (offset_y + 16) * ldc] = alpha * rC[2][5]);
+    (C[(offset_x + 32) + (offset_y + 17) * ldc] = alpha * rC[3][4]);
+    (C[(offset_x + 33) + (offset_y + 17) * ldc] = alpha * rC[3][5]);
+    (C[(offset_x + 32) + (offset_y + 32) * ldc] = alpha * rC[4][4]);
+    (C[(offset_x + 33) + (offset_y + 32) * ldc] = alpha * rC[4][5]);
+    (C[(offset_x + 32) + (offset_y + 33) * ldc] = alpha * rC[5][4]);
+    (C[(offset_x + 33) + (offset_y + 33) * ldc] = alpha * rC[5][5]);
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_48_8_8x8_6x6__ALPHA_SPLIT_ROW(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	 int CurrentOffSetA = 2*(gidx*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+        __local double* plA = (lA + idy*48 + 2*idx);
+        __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+
+        plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+        plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+        plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = alpha * rC[0][2*i] ;
+    C[ldc]    = alpha * rC[1][2*i] ;
+    C[16*ldc] = alpha * rC[2][2*i] ;
+    C[17*ldc] = alpha * rC[3][2*i] ;
+    C[32*ldc] = alpha * rC[4][2*i] ;
+    C[33*ldc] = alpha * rC[5][2*i] ;
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1]        =  alpha * rC[0][2*i+1] ;
+    C[1+ldc]    =  alpha * rC[1][2*i+1] ;
+    C[1+16*ldc] =  alpha * rC[2][2*i+1] ;
+    C[1+17*ldc] =  alpha * rC[3][2*i+1] ;
+    C[1+32*ldc] =  alpha * rC[4][2*i+1] ;
+    C[1+33*ldc] =  alpha * rC[5][2*i+1] ;
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_1_8_8x8_6x6__ALPHA_SPLIT_COLUMN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+        __local double2* plA = (__local double2*) (lA + idy*48 + 2*idx);
+        __local double* plB = (lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+        plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+        plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+        plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+        plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+        plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+
+        plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if( offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]   = alpha * rC[0][2*i] ;
+	C[1]   = alpha * rC[0][2*i+1] ;
+
+	if(offset_y+1<N)
+	{
+      C[ldc]   =  alpha * rC[1][2*i] ;
+	  C[1+ldc] =  alpha * rC[1][2*i+1] ;
+	}
+
+	if(offset_y+16<N)
+	{
+      C[16*ldc]   =  alpha * rC[2][2*i] ;
+	  C[1+16*ldc] =  alpha * rC[2][2*i+1] ;
+    }
+
+	if(offset_y+17<N)
+	{
+      C[17*ldc]   =  alpha * rC[3][2*i] ;
+	  C[1+17*ldc] =  alpha * rC[3][2*i+1] ;
+    }
+
+	if(offset_y+32<N)
+	{
+	  C[32*ldc]   =  alpha * rC[4][2*i] ;
+	  C[1+32*ldc] =  alpha * rC[4][2*i+1] ;
+	}
+
+	if(offset_y+33<N)
+	{
+      C[33*ldc]   =  alpha * rC[5][2*i] ;
+	  C[1+33*ldc] =  alpha * rC[5][2*i+1] ;
+    }
+
+	C+=16;
+	
+  }
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_8_8x8_6x6__ALPHA_SPLIT_SINGLE(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+	
+	int CurrentOffSetA = 2*(gidx*24 + idx);
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+        __local double* plA = (lA + idy*48 + 2*idx);
+        __local double* plB = (lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        //plB[0 ] = uB.d2v[0 ];
+        //plB[8 ] = uB.d2v[8 ];
+        //plB[16] = uB.d2v[16];
+        //plA[0 ] = uA.d2v[0 ];
+        //plA[8 ] = uA.d2v[8 ];
+        //plA[16] = uA.d2v[16];
+
+		
+        plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+        plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+        plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+        plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+        plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+        plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+        
+        plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+        plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+        plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = alpha * rC[0][2*i] ;
+	
+	if(offset_y+1<N)
+      C[ldc]    =  alpha * rC[1][2*i] ;
+    if(offset_y+16<N)
+      C[16*ldc] =  alpha * rC[2][2*i] ;
+	if(offset_y+17<N)
+      C[17*ldc] =  alpha * rC[3][2*i] ;
+	if(offset_y+32<N)
+      C[32*ldc] =  alpha * rC[4][2*i] ;
+	if(offset_y+33<N)
+      C[33*ldc] =  alpha * rC[5][2*i] ;
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1]        =  alpha * rC[0][2*i+1] ;
+	if(offset_y+1<N)
+      C[1+ldc]    =  alpha * rC[1][2*i+1] ;
+	if(offset_y+16<N)
+      C[1+16*ldc] =  alpha * rC[2][2*i+1] ;
+	if(offset_y+17<N)
+      C[1+17*ldc] =  alpha * rC[3][2*i+1] ;
+	if(offset_y+32<N)
+      C[1+32*ldc] =  alpha * rC[4][2*i+1] ;
+	if(offset_y+33<N)
+      C[1+33*ldc] =  alpha * rC[5][2*i+1] ;
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+";
+
+
+
+
+
+static const char * dgemm_NT_1_SPLIT__ALPHABETA = "
+
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_48_1_8x8_6x6__ALPHABETA_SPLIT_MAIN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double2* plA = (__local double2*)(lA + idy*48 + 2*idx);
+        __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+        plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+	    #pragma unroll 1
+        for(uint k = 0 ; k < min(8u, K-block_k ) ; k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+    (C[(offset_x +  0) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y +  0) * ldc], alpha * rC[0][0]));
+    (C[(offset_x +  1) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y +  0) * ldc], alpha * rC[0][1]));
+    (C[(offset_x +  0) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y +  1) * ldc], alpha * rC[1][0]));
+    (C[(offset_x +  1) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y +  1) * ldc], alpha * rC[1][1]));
+    (C[(offset_x +  0) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 16) * ldc], alpha * rC[2][0]));
+    (C[(offset_x +  1) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 16) * ldc], alpha * rC[2][1]));
+    (C[(offset_x +  0) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 17) * ldc], alpha * rC[3][0]));
+    (C[(offset_x +  1) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 17) * ldc], alpha * rC[3][1]));
+    (C[(offset_x +  0) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 32) * ldc], alpha * rC[4][0]));
+    (C[(offset_x +  1) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 32) * ldc], alpha * rC[4][1]));
+    (C[(offset_x +  0) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x +  0) + (offset_y + 33) * ldc], alpha * rC[5][0]));
+    (C[(offset_x +  1) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x +  1) + (offset_y + 33) * ldc], alpha * rC[5][1]));
+    (C[(offset_x + 16) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y +  0) * ldc], alpha * rC[0][2]));
+    (C[(offset_x + 17) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y +  0) * ldc], alpha * rC[0][3]));
+    (C[(offset_x + 16) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y +  1) * ldc], alpha * rC[1][2]));
+    (C[(offset_x + 17) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y +  1) * ldc], alpha * rC[1][3]));
+    (C[(offset_x + 16) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 16) * ldc], alpha * rC[2][2]));
+    (C[(offset_x + 17) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 16) * ldc], alpha * rC[2][3]));
+    (C[(offset_x + 16) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 17) * ldc], alpha * rC[3][2]));
+    (C[(offset_x + 17) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 17) * ldc], alpha * rC[3][3]));
+    (C[(offset_x + 16) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 32) * ldc], alpha * rC[4][2]));
+    (C[(offset_x + 17) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 32) * ldc], alpha * rC[4][3]));
+    (C[(offset_x + 16) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 16) + (offset_y + 33) * ldc], alpha * rC[5][2]));
+    (C[(offset_x + 17) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 17) + (offset_y + 33) * ldc], alpha * rC[5][3]));
+    (C[(offset_x + 32) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y +  0) * ldc], alpha * rC[0][4]));
+    (C[(offset_x + 33) + (offset_y +  0) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y +  0) * ldc], alpha * rC[0][5]));
+    (C[(offset_x + 32) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y +  1) * ldc], alpha * rC[1][4]));
+    (C[(offset_x + 33) + (offset_y +  1) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y +  1) * ldc], alpha * rC[1][5]));
+    (C[(offset_x + 32) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 16) * ldc], alpha * rC[2][4]));
+    (C[(offset_x + 33) + (offset_y + 16) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 16) * ldc], alpha * rC[2][5]));
+    (C[(offset_x + 32) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 17) * ldc], alpha * rC[3][4]));
+    (C[(offset_x + 33) + (offset_y + 17) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 17) * ldc], alpha * rC[3][5]));
+    (C[(offset_x + 32) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 32) * ldc], alpha * rC[4][4]));
+    (C[(offset_x + 33) + (offset_y + 32) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 32) * ldc], alpha * rC[4][5]));
+    (C[(offset_x + 32) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 32) + (offset_y + 33) * ldc], alpha * rC[5][4]));
+    (C[(offset_x + 33) + (offset_y + 33) * ldc] = mad(beta, C[(offset_x + 33) + (offset_y + 33) * ldc], alpha * rC[5][5]));
+}
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_48_1_8x8_6x6__ALPHABETA_SPLIT_ROW(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetA = 2*(gidx*24 + idx);
+	
+	  uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double* plA =(lA + idy*48 + 2*idx);
+        __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+               
+	    plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+        plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+        plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+        #pragma unroll 1
+        for(uint k = 0 ; k < min(8u, K-block_k ); k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = mad( beta, C[0]     , alpha * rC[0][2*i] );
+    C[ldc]    = mad( beta, C[ldc]   , alpha * rC[1][2*i] );
+    C[16*ldc] = mad( beta, C[16*ldc], alpha * rC[2][2*i] );
+    C[17*ldc] = mad( beta, C[17*ldc], alpha * rC[3][2*i] );
+    C[32*ldc] = mad( beta, C[32*ldc], alpha * rC[4][2*i] );
+    C[33*ldc] = mad( beta, C[33*ldc], alpha * rC[5][2*i] );
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1]        = mad( beta, C[1]     , alpha * rC[0][2*i+1] );
+    C[1+ldc]    = mad( beta, C[1+ldc]   , alpha * rC[1][2*i+1] );
+    C[1+16*ldc] = mad( beta, C[1+16*ldc], alpha * rC[2][2*i+1] );
+    C[1+17*ldc] = mad( beta, C[1+17*ldc], alpha * rC[3][2*i+1] );
+    C[1+32*ldc] = mad( beta, C[1+32*ldc], alpha * rC[4][2*i+1] );
+    C[1+33*ldc] = mad( beta, C[1+33*ldc], alpha * rC[5][2*i+1] );
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_1_1_8x8_6x6__ALPHABETA_SPLIT_COLUMN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double2* plA = (__local double2*)(lA + idy*48 + 2*idx);
+        __local double* plB = (lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+        plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+        plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+        plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+        plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+        plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+
+        plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+                #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if( offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]   = mad( beta, C[0], alpha * rC[0][2*i] );
+	C[1]   = mad( beta, C[1], alpha * rC[0][2*i+1] );
+
+	if(offset_y+1<N)
+	{
+      C[ldc]   = mad( beta, C[ldc], alpha * rC[1][2*i] );
+	  C[1+ldc] = mad( beta, C[1+ldc], alpha * rC[1][2*i+1] );
+	}
+
+	if(offset_y+16<N)
+	{
+      C[16*ldc]   = mad( beta, C[16*ldc], alpha * rC[2][2*i] );
+	  C[1+16*ldc] = mad( beta, C[1+16*ldc], alpha * rC[2][2*i+1] );
+    }
+
+	if(offset_y+17<N)
+	{
+      C[17*ldc]   = mad( beta, C[17*ldc], alpha * rC[3][2*i] );
+	  C[1+17*ldc] = mad( beta, C[1+17*ldc], alpha * rC[3][2*i+1] );
+    }
+
+	if(offset_y+32<N)
+	{
+	  C[32*ldc]   = mad( beta, C[32*ldc], alpha * rC[4][2*i] );
+	  C[1+32*ldc] = mad( beta, C[1+32*ldc], alpha * rC[4][2*i+1] );
+	}
+
+	if(offset_y+33<N)
+	{
+      C[33*ldc]   = mad( beta, C[33*ldc], alpha * rC[5][2*i] );
+	  C[1+33*ldc] = mad( beta, C[1+33*ldc], alpha * rC[5][2*i+1] );
+    }
+
+	C+=16;
+	
+  }
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_1_8x8_6x6__ALPHABETA_SPLIT_SINGLE(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+	int CurrentOffSetA = 2*(gidx*24 + idx);
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double* plA = (lA + idy*48 + 2*idx);
+        __local double* plB = (lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        
+        plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+        plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+        plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+        plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+        plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+        plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+        
+        plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+        plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+        plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+                #pragma unroll 1
+        for(uint k = 0 ; k < min(8u, K-block_k ); k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = mad( beta, C[0]     , alpha * rC[0][2*i] );
+	
+	if(offset_y+1<N)
+      C[ldc]    = mad( beta, C[ldc]   , alpha * rC[1][2*i] );
+    if(offset_y+16<N)
+      C[16*ldc] = mad( beta, C[16*ldc], alpha * rC[2][2*i] );
+	if(offset_y+17<N)
+      C[17*ldc] = mad( beta, C[17*ldc], alpha * rC[3][2*i] );
+	if(offset_y+32<N)
+      C[32*ldc] = mad( beta, C[32*ldc], alpha * rC[4][2*i] );
+	if(offset_y+33<N)
+      C[33*ldc] = mad( beta, C[33*ldc], alpha * rC[5][2*i] );
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1] = mad( beta, C[1]     , alpha * rC[0][2*i+1] );
+	
+	if(offset_y+1<N)
+      C[1+ldc]    = mad( beta, C[1+ldc]   , alpha * rC[1][2*i+1] );
+	if(offset_y+16<N)
+      C[1+16*ldc] = mad( beta, C[1+16*ldc], alpha * rC[2][2*i+1] );
+	if(offset_y+17<N)
+      C[1+17*ldc] = mad( beta, C[1+17*ldc], alpha * rC[3][2*i+1] );
+	if(offset_y+32<N)
+      C[1+32*ldc] = mad( beta, C[1+32*ldc], alpha * rC[4][2*i+1] );
+	if(offset_y+33<N)
+      C[1+33*ldc] = mad( beta, C[1+33*ldc], alpha * rC[5][2*i+1] );
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+";
+
+static const char * dgemm_NT_1_SPLIT__ALPHA = "
+typedef union GPtr {
+  __global float *f;
+  __global double *d;
+  __global float2 *f2v;
+  __global double2 *d2v;
+} GPtr;
+
+
+#define  M6x6 \
+            rA[0] = lA[offA +  0];                        \
+            rA[1] = lA[offA +  1];                        \
+            rA[2] = lA[offA + 16];                        \
+            rA[3] = lA[offA + 17];                        \
+            rA[4] = lA[offA + 32];                        \
+            rA[5] = lA[offA + 33];                        \
+            rB[0] = lB[offB +  0];                        \
+            rB[1] = lB[offB +  1];                        \
+            rB[2] = lB[offB + 16];                        \
+            rB[3] = lB[offB + 17];                        \
+            rB[4] = lB[offB + 32];                        \
+            rB[5] = lB[offB + 33];                        \
+            offA += 48;                                   \
+            offB += 48;                                   \
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);         \
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);         \
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);         \
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);         \
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);         \
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);         \
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);         \
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);         \
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);         \
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);         \
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);         \
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);         \
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);         \
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);         \
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);         \
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);         \
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);         \
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);         \
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);         \
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);         \
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);         \
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);         \
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);         \
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);         \
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);         \
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);         \
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);         \
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);         \
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);         \
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);         \
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);         \
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);         \
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);         \
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);         \
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);         \
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);         \
+            barrier(CLK_LOCAL_MEM_FENCE);
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_48_1_8x8_6x6__ALPHA_SPLIT_MAIN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double2* plA = (__local double2*)(lA + idy*48 + 2*idx);
+        __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+        plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+                #pragma unroll 1
+        for(uint k = 0 ; k < min(8u, K-block_k ); k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+    (C[(offset_x +  0) + (offset_y +  0) * ldc] = alpha * rC[0][0]);
+    (C[(offset_x +  1) + (offset_y +  0) * ldc] = alpha * rC[0][1]);
+    (C[(offset_x +  0) + (offset_y +  1) * ldc] = alpha * rC[1][0]);
+    (C[(offset_x +  1) + (offset_y +  1) * ldc] = alpha * rC[1][1]);
+    (C[(offset_x +  0) + (offset_y + 16) * ldc] = alpha * rC[2][0]);
+    (C[(offset_x +  1) + (offset_y + 16) * ldc] = alpha * rC[2][1]);
+    (C[(offset_x +  0) + (offset_y + 17) * ldc] = alpha * rC[3][0]);
+    (C[(offset_x +  1) + (offset_y + 17) * ldc] = alpha * rC[3][1]);
+    (C[(offset_x +  0) + (offset_y + 32) * ldc] = alpha * rC[4][0]);
+    (C[(offset_x +  1) + (offset_y + 32) * ldc] = alpha * rC[4][1]);
+    (C[(offset_x +  0) + (offset_y + 33) * ldc] = alpha * rC[5][0]);
+    (C[(offset_x +  1) + (offset_y + 33) * ldc] = alpha * rC[5][1]);
+    (C[(offset_x + 16) + (offset_y +  0) * ldc] = alpha * rC[0][2]);
+    (C[(offset_x + 17) + (offset_y +  0) * ldc] = alpha * rC[0][3]);
+    (C[(offset_x + 16) + (offset_y +  1) * ldc] = alpha * rC[1][2]);
+    (C[(offset_x + 17) + (offset_y +  1) * ldc] = alpha * rC[1][3]);
+    (C[(offset_x + 16) + (offset_y + 16) * ldc] = alpha * rC[2][2]);
+    (C[(offset_x + 17) + (offset_y + 16) * ldc] = alpha * rC[2][3]);
+    (C[(offset_x + 16) + (offset_y + 17) * ldc] = alpha * rC[3][2]);
+    (C[(offset_x + 17) + (offset_y + 17) * ldc] = alpha * rC[3][3]);
+    (C[(offset_x + 16) + (offset_y + 32) * ldc] = alpha * rC[4][2]);
+    (C[(offset_x + 17) + (offset_y + 32) * ldc] = alpha * rC[4][3]);
+    (C[(offset_x + 16) + (offset_y + 33) * ldc] = alpha * rC[5][2]);
+    (C[(offset_x + 17) + (offset_y + 33) * ldc] = alpha * rC[5][3]);
+    (C[(offset_x + 32) + (offset_y +  0) * ldc] = alpha * rC[0][4]);
+    (C[(offset_x + 33) + (offset_y +  0) * ldc] = alpha * rC[0][5]);
+    (C[(offset_x + 32) + (offset_y +  1) * ldc] = alpha * rC[1][4]);
+    (C[(offset_x + 33) + (offset_y +  1) * ldc] = alpha * rC[1][5]);
+    (C[(offset_x + 32) + (offset_y + 16) * ldc] = alpha * rC[2][4]);
+    (C[(offset_x + 33) + (offset_y + 16) * ldc] = alpha * rC[2][5]);
+    (C[(offset_x + 32) + (offset_y + 17) * ldc] = alpha * rC[3][4]);
+    (C[(offset_x + 33) + (offset_y + 17) * ldc] = alpha * rC[3][5]);
+    (C[(offset_x + 32) + (offset_y + 32) * ldc] = alpha * rC[4][4]);
+    (C[(offset_x + 33) + (offset_y + 32) * ldc] = alpha * rC[4][5]);
+    (C[(offset_x + 32) + (offset_y + 33) * ldc] = alpha * rC[5][4]);
+    (C[(offset_x + 33) + (offset_y + 33) * ldc] = alpha * rC[5][5]);
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_48_1_8x8_6x6__ALPHA_SPLIT_ROW(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetA = 2*(gidx*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double* plA = (lA + idy*48 + 2*idx);
+        __local double2* plB = (__local double2*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = uB.d2v[0 ];
+        plB[8 ] = uB.d2v[8 ];
+        plB[16] = uB.d2v[16];
+
+        plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+        plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+        plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+                #pragma unroll 1
+        for(uint k = 0 ; k < min(8u, K-block_k ); k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = alpha * rC[0][2*i] ;
+    C[ldc]    = alpha * rC[1][2*i] ;
+    C[16*ldc] = alpha * rC[2][2*i] ;
+    C[17*ldc] = alpha * rC[3][2*i] ;
+    C[32*ldc] = alpha * rC[4][2*i] ;
+    C[33*ldc] = alpha * rC[5][2*i] ;
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1]        =  alpha * rC[0][2*i+1] ;
+    C[1+ldc]    =  alpha * rC[1][2*i+1] ;
+    C[1+16*ldc] =  alpha * rC[2][2*i+1] ;
+    C[1+17*ldc] =  alpha * rC[3][2*i+1] ;
+    C[1+32*ldc] =  alpha * rC[4][2*i+1] ;
+    C[1+33*ldc] =  alpha * rC[5][2*i+1] ;
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_48_1_1_8x8_6x6__ALPHA_SPLIT_COLUMN(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double2* plA = (__local double2*)(lA + idy*48 + 2*idx);
+        __local double* plB = (__local double*)(lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+        plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+        plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+        plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+        plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+        plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+		
+		plA[0 ] = uA.d2v[0 ];
+        plA[8 ] = uA.d2v[8 ];
+        plA[16] = uA.d2v[16];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+       
+	    #pragma unroll 1
+        for(uint k = 0 ; k < min(8u, K-block_k ); k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if( offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]   = alpha * rC[0][2*i] ;
+	C[1]   = alpha * rC[0][2*i+1] ;
+
+	if(offset_y+1<N)
+	{
+      C[ldc]   =  alpha * rC[1][2*i] ;
+	  C[1+ldc] =  alpha * rC[1][2*i+1] ;
+	}
+
+	if(offset_y+16<N)
+	{
+      C[16*ldc]   =  alpha * rC[2][2*i] ;
+	  C[1+16*ldc] =  alpha * rC[2][2*i+1] ;
+    }
+
+	if(offset_y+17<N)
+	{
+      C[17*ldc]   =  alpha * rC[3][2*i] ;
+	  C[1+17*ldc] =  alpha * rC[3][2*i+1] ;
+    }
+
+	if(offset_y+32<N)
+	{
+	  C[32*ldc]   =  alpha * rC[4][2*i] ;
+	  C[1+32*ldc] =  alpha * rC[4][2*i+1] ;
+	}
+
+	if(offset_y+33<N)
+	{
+      C[33*ldc]   =  alpha * rC[5][2*i] ;
+	  C[1+33*ldc] =  alpha * rC[5][2*i+1] ;
+    }
+
+	C+=16;
+	
+  }
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NT_1_1_1_8x8_6x6__ALPHA_SPLIT_SINGLE(__global double2 const * restrict A,
+                                       __global double2 const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    GPtr uA, uB;
+    uA.d2v = (__global double2 *)A;
+    uB.d2v = (__global double2 *)B;
+ //   C += offsetC;
+
+
+    uA.d += offsetA;
+    uB.d += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+	int CurrentOffSetA = 2*(gidx*24 + idx);
+	int CurrentOffSetB = 2*(gidy*24 + idx);
+
+    uA.d += 2*(gidx*24 + idx) + idy*lda;
+    uB.d += 2*(gidy*24 + idx) + idy*ldb;
+
+    int block_k = 0;//K >> 3;
+    do {
+        __local double* plA = (lA + idy*48 + 2*idx);
+        __local double* plB = (lB + idy*48 + 2*idx);
+//        barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0 ] = CurrentOffSetB>=N?0.0:uB.d[0 ];
+        plB[1 ] = CurrentOffSetB+1>=N?0.0:uB.d[1 ];
+        plB[16] = CurrentOffSetB+16>=N?0.0:uB.d[16 ];
+        plB[17] = CurrentOffSetB+17>=N?0.0:uB.d[17];
+        plB[32] = CurrentOffSetB+32>=N?0.0:uB.d[32];
+        plB[33] = CurrentOffSetB+33>=N?0.0:uB.d[33];
+        
+        plA[0]  =  CurrentOffSetA>=M?0.0:uA.d[0];
+        plA[1]  =  CurrentOffSetA+1>=M?0.0:uA.d[1];
+        plA[16] =  CurrentOffSetA+16>=M?0.0:uA.d[16];
+        plA[17] =  CurrentOffSetA+17>=M?0.0:uA.d[17];
+        plA[32] =  CurrentOffSetA+32>=M?0.0:uA.d[32];
+        plA[33] =  CurrentOffSetA+33>=M?0.0:uA.d[33];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx << 1;
+        int offB = idy << 1;
+             
+        #pragma unroll 1
+        for(uint k = 0 ; k < min(8u, K-block_k ); k+=1)
+        {
+            rA[0] = lA[offA +  0];
+            rA[1] = lA[offA +  1];
+            rA[2] = lA[offA + 16];
+            rA[3] = lA[offA + 17];
+            rA[4] = lA[offA + 32];
+            rA[5] = lA[offA + 33];
+            rB[0] = lB[offB +  0];
+            rB[1] = lB[offB +  1];
+            rB[2] = lB[offB + 16];
+            rB[3] = lB[offB + 17];
+            rB[4] = lB[offB + 32];
+            rB[5] = lB[offB + 33];
+            offA += 48;
+            offB += 48;
+            rC[0][0] = mad(rA[0],rB[0],rC[0][0]);
+            rC[0][1] = mad(rA[1],rB[0],rC[0][1]);
+            rC[0][2] = mad(rA[2],rB[0],rC[0][2]);
+            rC[0][3] = mad(rA[3],rB[0],rC[0][3]);
+            rC[0][4] = mad(rA[4],rB[0],rC[0][4]);
+            rC[0][5] = mad(rA[5],rB[0],rC[0][5]);
+            rC[1][0] = mad(rA[0],rB[1],rC[1][0]);
+            rC[1][1] = mad(rA[1],rB[1],rC[1][1]);
+            rC[1][2] = mad(rA[2],rB[1],rC[1][2]);
+            rC[1][3] = mad(rA[3],rB[1],rC[1][3]);
+            rC[1][4] = mad(rA[4],rB[1],rC[1][4]);
+            rC[1][5] = mad(rA[5],rB[1],rC[1][5]);
+            rC[2][0] = mad(rA[0],rB[2],rC[2][0]);
+            rC[2][1] = mad(rA[1],rB[2],rC[2][1]);
+            rC[2][2] = mad(rA[2],rB[2],rC[2][2]);
+            rC[2][3] = mad(rA[3],rB[2],rC[2][3]);
+            rC[2][4] = mad(rA[4],rB[2],rC[2][4]);
+            rC[2][5] = mad(rA[5],rB[2],rC[2][5]);
+            rC[3][0] = mad(rA[0],rB[3],rC[3][0]);
+            rC[3][1] = mad(rA[1],rB[3],rC[3][1]);
+            rC[3][2] = mad(rA[2],rB[3],rC[3][2]);
+            rC[3][3] = mad(rA[3],rB[3],rC[3][3]);
+            rC[3][4] = mad(rA[4],rB[3],rC[3][4]);
+            rC[3][5] = mad(rA[5],rB[3],rC[3][5]);
+            rC[4][0] = mad(rA[0],rB[4],rC[4][0]);
+            rC[4][1] = mad(rA[1],rB[4],rC[4][1]);
+            rC[4][2] = mad(rA[2],rB[4],rC[4][2]);
+            rC[4][3] = mad(rA[3],rB[4],rC[4][3]);
+            rC[4][4] = mad(rA[4],rB[4],rC[4][4]);
+            rC[4][5] = mad(rA[5],rB[4],rC[4][5]);
+            rC[5][0] = mad(rA[0],rB[5],rC[5][0]);
+            rC[5][1] = mad(rA[1],rB[5],rC[5][1]);
+            rC[5][2] = mad(rA[2],rB[5],rC[5][2]);
+            rC[5][3] = mad(rA[3],rB[5],rC[5][3]);
+            rC[5][4] = mad(rA[4],rB[5],rC[5][4]);
+            rC[5][5] = mad(rA[5],rB[5],rC[5][5]);
+			barrier(CLK_LOCAL_MEM_FENCE);
+        }
+        uA.d += lda << 3;
+        uB.d += ldb << 3;
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx*2;
+    int offset_y = gidy*48+ idy*2;
+	
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+
+  for (int i = 0; i<3; i++)
+  {
+    C[0]      = alpha * rC[0][2*i] ;
+	
+	if(offset_y+1<N)
+      C[ldc]    =  alpha * rC[1][2*i] ;
+    if(offset_y+16<N)
+      C[16*ldc] =  alpha * rC[2][2*i] ;
+	if(offset_y+17<N)
+      C[17*ldc] =  alpha * rC[3][2*i] ;
+	if(offset_y+32<N)
+      C[32*ldc] =  alpha * rC[4][2*i] ;
+	if(offset_y+33<N)
+      C[33*ldc] =  alpha * rC[5][2*i] ;
+
+	if(offset_x+1>=M )
+	  return;
+	
+	C[1]        =  alpha * rC[0][2*i+1] ;
+	if(offset_y+1<N)
+      C[1+ldc]    =  alpha * rC[1][2*i+1] ;
+	if(offset_y+16<N)
+      C[1+16*ldc] =  alpha * rC[2][2*i+1] ;
+	if(offset_y+17<N)
+      C[1+17*ldc] =  alpha * rC[3][2*i+1] ;
+	if(offset_y+32<N)
+      C[1+32*ldc] =  alpha * rC[4][2*i+1] ;
+	if(offset_y+33<N)
+      C[1+33*ldc] =  alpha * rC[5][2*i+1] ;
+
+	C+=16;
+	offset_x+=16;
+	if(offset_x>=M )
+       return;
+
+  }
+}
+";
+
+
+static const char * dgemm_NN_8_SPLIT__ALPHABETA = "
+
+
+
+#define  M6x6 \
+            rA[0] = lA[offA + 0];                       \
+            rA[1] = lA[offA + 8];                       \
+            rA[2] = lA[offA + 16];                      \
+            rA[3] = lA[offA + 24];                      \
+            rA[4] = lA[offA + 32];                      \
+            rA[5] = lA[offA + 40];                      \
+            rB[0] = lB[offB + 0];                       \
+            rB[1] = lB[offB + 8];                       \
+            rB[2] = lB[offB + 16];                      \
+            rB[3] = lB[offB + 24];                      \
+            rB[4] = lB[offB + 32];                      \
+            rB[5] = lB[offB + 40];                      \
+            offA += 49;                                 \
+            offB += 49;                                 \
+            rC[0][0]=mad(rA[0],rB[0],rC[0][0]);         \
+            rC[1][0]=mad(rA[1],rB[0],rC[1][0]);         \
+            rC[2][0]=mad(rA[2],rB[0],rC[2][0]);         \
+            rC[3][0]=mad(rA[3],rB[0],rC[3][0]);         \
+            rC[4][0]=mad(rA[4],rB[0],rC[4][0]);         \
+            rC[5][0]=mad(rA[5],rB[0],rC[5][0]);         \
+            rC[0][1]=mad(rA[0],rB[1],rC[0][1]);         \
+            rC[1][1]=mad(rA[1],rB[1],rC[1][1]);         \
+            rC[2][1]=mad(rA[2],rB[1],rC[2][1]);         \
+            rC[3][1]=mad(rA[3],rB[1],rC[3][1]);         \
+            rC[4][1]=mad(rA[4],rB[1],rC[4][1]);         \
+            rC[5][1]=mad(rA[5],rB[1],rC[5][1]);         \
+            rC[0][2]=mad(rA[0],rB[2],rC[0][2]);         \
+            rC[1][2]=mad(rA[1],rB[2],rC[1][2]);         \
+            rC[2][2]=mad(rA[2],rB[2],rC[2][2]);         \
+            rC[3][2]=mad(rA[3],rB[2],rC[3][2]);         \
+            rC[4][2]=mad(rA[4],rB[2],rC[4][2]);         \
+            rC[5][2]=mad(rA[5],rB[2],rC[5][2]);         \
+            rC[0][3]=mad(rA[0],rB[3],rC[0][3]);         \
+            rC[1][3]=mad(rA[1],rB[3],rC[1][3]);         \
+            rC[2][3]=mad(rA[2],rB[3],rC[2][3]);         \
+            rC[3][3]=mad(rA[3],rB[3],rC[3][3]);         \
+            rC[4][3]=mad(rA[4],rB[3],rC[4][3]);         \
+            rC[5][3]=mad(rA[5],rB[3],rC[5][3]);         \
+            rC[0][4]=mad(rA[0],rB[4],rC[0][4]);         \
+            rC[1][4]=mad(rA[1],rB[4],rC[1][4]);         \
+            rC[2][4]=mad(rA[2],rB[4],rC[2][4]);         \
+            rC[3][4]=mad(rA[3],rB[4],rC[3][4]);         \
+            rC[4][4]=mad(rA[4],rB[4],rC[4][4]);         \
+            rC[5][4]=mad(rA[5],rB[4],rC[5][4]);         \
+            rC[0][5]=mad(rA[0],rB[5],rC[0][5]);         \
+            rC[1][5]=mad(rA[1],rB[5],rC[1][5]);         \
+            rC[2][5]=mad(rA[2],rB[5],rC[2][5]);         \
+            rC[3][5]=mad(rA[3],rB[5],rC[3][5]);         \
+            rC[4][5]=mad(rA[4],rB[5],rC[4][5]);         \
+            rC[5][5]=mad(rA[5],rB[5],rC[5][5]);         \
+            barrier(CLK_LOCAL_MEM_FENCE);
+
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_48_8_8x8_6x6__ALPHABETA_SPLIT_MAIN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+    int block_k = K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx;
+        int offB = idy;
+
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+
+
+  C+= gidx*48;
+  C+= idx;
+  C+= gidy*48*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[0][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[0][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[1][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[1][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[2][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[2][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[3][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[3][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[4][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[4][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[4][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[4][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[4][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[5][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[5][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[5][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[5][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[5][5] + beta*C[40*ldc];
+}
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_48_8_8x8_6x6__ALPHABETA_SPLIT_ROW(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = M/48;//get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+ 
+  int CurrentOffSetA =   gidx*48 + idxT;
+   
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+    int block_k = K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+    
+       
+	    plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+		plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_x>=M )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0] + beta*C[0*ldc+i*8];
+    C[8*ldc+i*8]  = alpha*rC[i][1] + beta*C[8*ldc+i*8];
+    C[16*ldc+i*8] = alpha*rC[i][2] + beta*C[16*ldc+i*8];
+    C[24*ldc+i*8] = alpha*rC[i][3] + beta*C[24*ldc+i*8];
+    C[32*ldc+i*8] = alpha*rC[i][4] + beta*C[32*ldc+i*8];
+    C[40*ldc+i*8] = alpha*rC[i][5] + beta*C[40*ldc+i*8];
+	
+    offset_x += 8;
+    if(offset_x>=M)
+        return;
+  }while (++i < 6);
+  
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_1_8_8x8_6x6__ALPHABETA_SPLIT_COLUMN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = N/48;//get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  (gidy*48+idyT)*ldb + idxT;
+
+  int CurrentOffSetB =   gidy*48 + idyT;
+
+    int block_k = K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+
+		plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[0+16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[0+32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[0+40*ldb];
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[i*8]  = alpha*rC[i][0] + beta*C[i*8];
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] + beta*C[8*ldc+i*8];
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] + beta*C[16*ldc+i*8];
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3] + beta*C[24*ldc+i*8];
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4] + beta*C[32*ldc+i*8];
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5] + beta*C[40*ldc+i*8];
+
+  }while (++i < 6);
+
+
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_1_8_8x8_6x6__ALPHABETA_SPLIT_SINGLE(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+    __local double lA[392];
+    __local double lB[392];
+
+    int gidx = M/48;//get_group_id(0);
+    int gidy = N/48;//get_group_id(1);
+    int idx  = get_local_id(0);
+    int idy  = get_local_id(1);
+
+
+
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  int CurrentOffSetA =   gidx*48 + idxT;
+  int CurrentOffSetB =   gidy*48 + idyT;
+ 
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  (gidy*48+idyT)*ldb + idxT;
+
+  __local double* plA = lA + idyT*49 + idxT;
+  __local double* plB = lB + idxT*49 + idyT;
+
+
+    int block_k = K >> 3;
+    do {
+
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[40*ldb];
+
+
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+        barrier(CLK_LOCAL_MEM_FENCE);
+
+        int offA = idx ;
+        int offB = idy ;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+
+    C[0*ldc+i*8]  = alpha*rC[i][0] + beta*C[0*ldc+i*8];
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] + beta*C[8*ldc+i*8];
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] + beta*C[16*ldc+i*8];
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3] + beta*C[24*ldc+i*8];
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4] + beta*C[32*ldc+i*8];
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5] + beta*C[40*ldc+i*8];
+
+	offset_x += 8;
+    if(offset_x>=M)
+        return;
+
+
+  }while (++i < 6);
+}
+";
+
+static const char * dgemm_NN_8_SPLIT__ALPHA = "
+
+
+
+#define  M6x6 \
+            rA[0] = lA[offA + 0];\
+            rA[1] = lA[offA + 8];\
+            rA[2] = lA[offA + 16];\
+            rA[3] = lA[offA + 24];\
+            rA[4] = lA[offA + 32];\
+            rA[5] = lA[offA + 40];\
+            rB[0] = lB[offB + 0];\
+            rB[1] = lB[offB + 8];\
+            rB[2] = lB[offB + 16];\
+            rB[3] = lB[offB + 24]; \
+            rB[4] = lB[offB + 32]; \
+            rB[5] = lB[offB + 40]; \
+            offA += 49; \
+            offB += 49; \
+            rC[0][0]=mad(rA[0],rB[0],rC[0][0]);         \
+            rC[1][0]=mad(rA[1],rB[0],rC[1][0]);         \
+            rC[2][0]=mad(rA[2],rB[0],rC[2][0]);         \
+            rC[3][0]=mad(rA[3],rB[0],rC[3][0]);         \
+            rC[4][0]=mad(rA[4],rB[0],rC[4][0]);         \
+            rC[5][0]=mad(rA[5],rB[0],rC[5][0]);         \
+            rC[0][1]=mad(rA[0],rB[1],rC[0][1]);         \
+            rC[1][1]=mad(rA[1],rB[1],rC[1][1]);         \
+            rC[2][1]=mad(rA[2],rB[1],rC[2][1]);         \
+            rC[3][1]=mad(rA[3],rB[1],rC[3][1]);         \
+            rC[4][1]=mad(rA[4],rB[1],rC[4][1]);         \
+            rC[5][1]=mad(rA[5],rB[1],rC[5][1]);         \
+            rC[0][2]=mad(rA[0],rB[2],rC[0][2]);         \
+            rC[1][2]=mad(rA[1],rB[2],rC[1][2]);         \
+            rC[2][2]=mad(rA[2],rB[2],rC[2][2]);         \
+            rC[3][2]=mad(rA[3],rB[2],rC[3][2]);         \
+            rC[4][2]=mad(rA[4],rB[2],rC[4][2]);         \
+            rC[5][2]=mad(rA[5],rB[2],rC[5][2]);         \
+            rC[0][3]=mad(rA[0],rB[3],rC[0][3]);         \
+            rC[1][3]=mad(rA[1],rB[3],rC[1][3]);         \
+            rC[2][3]=mad(rA[2],rB[3],rC[2][3]);         \
+            rC[3][3]=mad(rA[3],rB[3],rC[3][3]);         \
+            rC[4][3]=mad(rA[4],rB[3],rC[4][3]);         \
+            rC[5][3]=mad(rA[5],rB[3],rC[5][3]);         \
+            rC[0][4]=mad(rA[0],rB[4],rC[0][4]);         \
+            rC[1][4]=mad(rA[1],rB[4],rC[1][4]);         \
+            rC[2][4]=mad(rA[2],rB[4],rC[2][4]);         \
+            rC[3][4]=mad(rA[3],rB[4],rC[3][4]);         \
+            rC[4][4]=mad(rA[4],rB[4],rC[4][4]);         \
+            rC[5][4]=mad(rA[5],rB[4],rC[5][4]);         \
+            rC[0][5]=mad(rA[0],rB[5],rC[0][5]);         \
+            rC[1][5]=mad(rA[1],rB[5],rC[1][5]);         \
+            rC[2][5]=mad(rA[2],rB[5],rC[2][5]);         \
+            rC[3][5]=mad(rA[3],rB[5],rC[3][5]);         \
+            rC[4][5]=mad(rA[4],rB[5],rC[4][5]);         \
+            rC[5][5]=mad(rA[5],rB[5],rC[5][5]);         \
+            barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_48_8_8x8_6x6__ALPHA_SPLIT_MAIN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+    int block_k = K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+
+
+  C+= gidx*48;
+  C+= idx;
+  C+= gidy*48*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0]  ;
+  C[8*ldc] = alpha*rC[0][1]  ;
+  C[16*ldc] = alpha*rC[0][2] ;
+  C[24*ldc] = alpha*rC[0][3] ;
+  C[32*ldc] = alpha*rC[0][4] ;
+  C[40*ldc] = alpha*rC[0][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[1][0]  ;
+  C[8*ldc] = alpha*rC[1][1]  ;
+  C[16*ldc] = alpha*rC[1][2] ;
+  C[24*ldc] = alpha*rC[1][3] ;
+  C[32*ldc] = alpha*rC[1][4] ;
+  C[40*ldc] = alpha*rC[1][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[2][0]  ;
+  C[8*ldc] = alpha*rC[2][1]  ;
+  C[16*ldc] = alpha*rC[2][2] ;
+  C[24*ldc] = alpha*rC[2][3] ;
+  C[32*ldc] = alpha*rC[2][4] ;
+  C[40*ldc] = alpha*rC[2][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[3][0]  ;
+  C[8*ldc] = alpha*rC[3][1]  ;
+  C[16*ldc] = alpha*rC[3][2] ;
+  C[24*ldc] = alpha*rC[3][3] ;
+  C[32*ldc] = alpha*rC[3][4] ;
+  C[40*ldc] = alpha*rC[3][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[4][0]  ;
+  C[8*ldc] = alpha*rC[4][1]  ;
+  C[16*ldc] = alpha*rC[4][2] ;
+  C[24*ldc] = alpha*rC[4][3] ;
+  C[32*ldc] = alpha*rC[4][4] ;
+  C[40*ldc] = alpha*rC[4][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[5][0]  ;
+  C[8*ldc] = alpha*rC[5][1]  ;
+  C[16*ldc] = alpha*rC[5][2] ;
+  C[24*ldc] = alpha*rC[5][3] ;
+  C[32*ldc] = alpha*rC[5][4] ;
+  C[40*ldc] = alpha*rC[5][5] ;
+  
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_48_8_8x8_6x6__ALPHA_SPLIT_ROW(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = M/48;//get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  
+int CurrentOffSetA =   gidx*48 + idxT;
+  
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+    int block_k = K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+
+		 plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+		
+		barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_x>=M )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  for (int i=0; i<6; i++)
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0] ;
+    C[8*ldc+i*8]  = alpha*rC[i][1] ;
+    C[16*ldc+i*8] = alpha*rC[i][2] ;
+    C[24*ldc+i*8] = alpha*rC[i][3] ;
+    C[32*ldc+i*8] = alpha*rC[i][4] ;
+    C[40*ldc+i*8] = alpha*rC[i][5] ;
+	
+    offset_x += 8;
+    if(offset_x>=M)
+        return;
+  }
+}
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_1_8_8x8_6x6__ALPHA_SPLIT_COLUMN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    A += offsetA;
+    B += offsetB;
+    C += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = N/48;//get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+   int CurrentOffSetB =   gidy*48 + idyT;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+    int block_k = K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+		plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[0+16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[0+32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[0+40*ldb];
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+        
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0]   ;
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] ;
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] ;
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3] ;
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4] ;
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5] ;
+
+
+
+  }while (++i < 6);
+  
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_1_8_8x8_6x6__ALPHA_SPLIT_SINGLE(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    A += offsetA;
+    B += offsetB;
+    C  += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = M/48;//get_group_id(0);
+  int gidy = N/48;//get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  
+  int CurrentOffSetA =   gidx*48 + idxT;
+  int CurrentOffSetB =   gidy*48 + idyT;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+    int block_k = K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+
+	    plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[0+16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[0+32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[0+40*ldb];
+
+
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        A += lda << 3;
+        B += 8;
+    } while (--block_k > 0);
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0]  ;
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] ;
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] ;
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3];
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4];
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5];
+
+	offset_x += 8;
+    if(offset_x>=M)
+        return;
+
+
+  }while (++i < 6);
+  
+}
+";
+
+
+
+
+
+static const char * dgemm_NN_1_SPLIT__ALPHABETA = "
+
+
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_48_1_8x8_6x6__ALPHABETA_SPLIT_MAIN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+		plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+        
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+        
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+
+
+        A += lda << 3;
+        B += 8;	
+		block_k+=8;
+    } while (block_k < K);
+
+
+
+  C+= gidx*48;
+  C+= idx;
+  C+= gidy*48*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[0][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[0][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[0][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[0][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[0][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[1][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[1][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[1][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[1][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[1][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[2][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[2][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[2][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[2][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[2][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[3][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[3][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[3][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[3][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[3][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[4][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[4][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[4][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[4][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[4][5] + beta*C[40*ldc];
+  C+=8;
+  C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+  C[8*ldc] = alpha*rC[5][1] + beta*C[8*ldc];
+  C[16*ldc] = alpha*rC[5][2] + beta*C[16*ldc];
+  C[24*ldc] = alpha*rC[5][3] + beta*C[24*ldc];
+  C[32*ldc] = alpha*rC[5][4] + beta*C[32*ldc];
+  C[40*ldc] = alpha*rC[5][5] + beta*C[40*ldc];
+  
+ }
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_48_1_8x8_6x6__ALPHABETA_SPLIT_ROW(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = M/48;//get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  int CurrentOffSetA =   gidx*48 + idxT;
+   
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+
+		plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+        A += lda << 3;
+        B += 8;
+        block_k+=8;
+    } while (block_k < K);
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_x>=M  )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0] + beta*C[0*ldc+i*8];
+	
+      C[8*ldc+i*8]  = alpha*rC[i][1] + beta*C[8*ldc+i*8];
+    
+	  C[16*ldc+i*8] = alpha*rC[i][2] + beta*C[16*ldc+i*8];
+    
+	  C[24*ldc+i*8] = alpha*rC[i][3] + beta*C[24*ldc+i*8];
+    
+      C[32*ldc+i*8] = alpha*rC[i][4] + beta*C[32*ldc+i*8];
+    
+	  C[40*ldc+i*8] = alpha*rC[i][5] + beta*C[40*ldc+i*8];
+
+	offset_x += 8;
+    if(offset_x>=M)
+        return;
+
+  }while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_1_1_8x8_6x6__ALPHABETA_SPLIT_COLUMN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = N/48;//get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+  int CurrentOffSetB =   gidy*48 + idyT;
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+		plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[0+16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[0+32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[0+40*ldb];
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+        
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+        A += lda << 3;
+        B += 8;block_k+=8;
+    } while (block_k < K);
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if( offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0] + beta*C[0*ldc+i*8];
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] + beta*C[8*ldc+i*8];
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] + beta*C[16*ldc+i*8];
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3] + beta*C[24*ldc+i*8];
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4] + beta*C[32*ldc+i*8];
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5] + beta*C[40*ldc+i*8];
+
+
+
+  }while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_1_1_8x8_6x6__ALPHABETA_SPLIT_SINGLE(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       double const beta,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = M/48;//get_group_id(0);
+  int gidy = N/48; //get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  int CurrentOffSetA =   gidx*48 + idxT;
+  int CurrentOffSetB =   gidy*48 + idyT;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[0+16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[0+32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[0+40*ldb];
+
+
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+        A += lda << 3;
+        B += 8;
+        block_k+=8;
+    } while (block_k < K);
+
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0] + beta*C[0*ldc+i*8];
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] + beta*C[8*ldc+i*8];
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] + beta*C[16*ldc+i*8];
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3] + beta*C[24*ldc+i*8];
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4] + beta*C[32*ldc+i*8];
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5] + beta*C[40*ldc+i*8];
+
+	offset_x += 8;
+    if(offset_x>=M)
+        return;
+
+
+  }while (++i < 6);
+}
+";
+
+static const char * dgemm_NN_1_SPLIT__ALPHA = "
+
+
+
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_48_1_8x8_6x6__ALPHA_SPLIT_MAIN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+        A += lda << 3;
+        B += 8;
+        block_k+=8;
+    } while (block_k < K);
+
+
+  C+= gidx*48;
+  C+= idx;
+  C+= gidy*48*ldc;
+  C+= idy*ldc;
+ 
+  C[0*ldc] = alpha*rC[0][0]  ;
+  C[8*ldc] = alpha*rC[0][1]  ;
+  C[16*ldc] = alpha*rC[0][2] ;
+  C[24*ldc] = alpha*rC[0][3] ;
+  C[32*ldc] = alpha*rC[0][4] ;
+  C[40*ldc] = alpha*rC[0][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[1][0]  ;
+  C[8*ldc] = alpha*rC[1][1]  ;
+  C[16*ldc] = alpha*rC[1][2] ;
+  C[24*ldc] = alpha*rC[1][3] ;
+  C[32*ldc] = alpha*rC[1][4] ;
+  C[40*ldc] = alpha*rC[1][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[2][0]  ;
+  C[8*ldc] = alpha*rC[2][1]  ;
+  C[16*ldc] = alpha*rC[2][2] ;
+  C[24*ldc] = alpha*rC[2][3] ;
+  C[32*ldc] = alpha*rC[2][4] ;
+  C[40*ldc] = alpha*rC[2][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[3][0]  ;
+  C[8*ldc] = alpha*rC[3][1]  ;
+  C[16*ldc] = alpha*rC[3][2] ;
+  C[24*ldc] = alpha*rC[3][3] ;
+  C[32*ldc] = alpha*rC[3][4] ;
+  C[40*ldc] = alpha*rC[3][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[4][0]  ;
+  C[8*ldc] = alpha*rC[4][1]  ;
+  C[16*ldc] = alpha*rC[4][2] ;
+  C[24*ldc] = alpha*rC[4][3] ;
+  C[32*ldc] = alpha*rC[4][4] ;
+  C[40*ldc] = alpha*rC[4][5] ;
+  C+=8;						 ;
+  C[0*ldc] = alpha*rC[5][0]  ;
+  C[8*ldc] = alpha*rC[5][1]  ;
+  C[16*ldc] = alpha*rC[5][2] ;
+  C[24*ldc] = alpha*rC[5][3] ;
+  C[32*ldc] = alpha*rC[5][4] ;
+  C[40*ldc] = alpha*rC[5][5] ;
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_48_1_8x8_6x6__ALPHA_SPLIT_ROW(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+
+  int gidx = M/48;//get_group_id(0);
+  int gidy = get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  int CurrentOffSetA =   gidx*48 + idxT;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        
+        plB[0] = B[0+0*ldb];
+        plB[8] = B[0+8*ldb];
+        plB[16] = B[0+16*ldb];
+        plB[24] = B[0+24*ldb];
+        plB[32] = B[0+32*ldb];
+        plB[40] = B[0+40*ldb];
+
+		plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+        A += lda << 3;
+        B += 8;block_k+=8;
+    } while (block_k < K);
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_x>=M )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0]  ;
+	
+      C[8*ldc+i*8]  = alpha*rC[i][1] ;
+    
+	  C[16*ldc+i*8] = alpha*rC[i][2] ;
+    
+	  C[24*ldc+i*8] = alpha*rC[i][3];
+    
+      C[32*ldc+i*8] = alpha*rC[i][4];
+    
+	  C[40*ldc+i*8] = alpha*rC[i][5];
+
+	offset_x += 8;
+    if(offset_x>=M)
+        return;
+
+
+  }while (++i < 6);
+}
+
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_48_1_1_8x8_6x6__ALPHA_SPLIT_COLUMN(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+  __local double lA[392];
+  __local double lB[392];
+
+
+  int gidx = get_group_id(0);
+  int gidy = N/48;//get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+   int CurrentOffSetB =   gidy*48 + idyT;
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+		plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[0+16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[0+32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[0+40*ldb];
+
+        plA[0] = A[0+0*lda];
+        plA[8] = A[8+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[24] = A[24+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[40] = A[40+0*lda];
+        
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+        A += lda << 3;
+        B += 8;block_k+=8;
+    } while (block_k < K);
+
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0]  ;
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] ;
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] ;
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3];
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4];
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5];
+
+
+
+  }while (++i < 6);
+}
+
+
+__attribute__((reqd_work_group_size(8,8,1)))
+__kernel void dgemm_NN_1_1_1_8x8_6x6__ALPHA_SPLIT_SINGLE(__global double const * restrict A,
+                                       __global double const * restrict B,
+                                       __global double * C,
+                                       uint const M,
+                                       uint const N,
+                                       uint const K,
+                                       double const alpha,
+                                       uint lda,
+                                       uint ldb,
+                                       uint ldc,
+                                       uint offsetA,
+                                       uint offsetB,
+                                       uint offsetC)
+{
+    A += offsetA;
+    B += offsetB;
+    C    += offsetC;
+
+
+    double rC[6][6] = {(double)0};
+    double rA[6];
+    double rB[6];
+
+  __local double lA[392];
+  __local double lB[392];
+  
+  int gidx = M/48;//get_group_id(0);
+  int gidy = N/48;//get_group_id(1);
+  int idx = get_local_id(0);
+  int idy = get_local_id(1);
+
+  int idt = 8*idy + idx;
+  int idxT = idt % 8;
+  int idyT = idt / 8;
+
+  int CurrentOffSetA =   gidx*48 + idxT;
+  int CurrentOffSetB =   gidy*48 + idyT;
+
+
+  A +=  gidx*48+ idxT + idyT*lda;
+  B +=  gidy*48*ldb+ idx + idy*ldb;
+
+
+
+
+    int block_k = 0;//K >> 3;
+    do {
+
+	    __local double* plA = lA + idyT*49 + idxT;
+        __local double* plB = lB + idxT*49 + idyT;
+
+        
+	    plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[8]  = CurrentOffSetB+8>=N?0.0:B[0+8*ldb];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[0+16*ldb];
+        plB[24] = CurrentOffSetB+24>=N?0.0:B[0+24*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[0+32*ldb];
+        plB[40] = CurrentOffSetB+40>=N?0.0:B[0+40*ldb];
+
+
+        plA[0] = CurrentOffSetA>=M?0.0:A[0];
+        plA[8] = CurrentOffSetA+8>=M?0.0:A[8];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[24] = CurrentOffSetA+24>=M?0.0:A[24];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[40] = CurrentOffSetA+40>=M?0.0:A[40];
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+        int offA = idx ;
+        int offB = idy ;
+	    #pragma unroll 1
+        for(uint k = 0 ; k <min(8u, K-block_k ); k+=1)
+        {
+		  rA[0] = lA[offA + 0];               
+		  rA[1] = lA[offA + 8];               
+		  rA[2] = lA[offA + 16];              
+		  rA[3] = lA[offA + 24];              
+		  rA[4] = lA[offA + 32];              
+		  rA[5] = lA[offA + 40];              
+		  rB[0] = lB[offB + 0];               
+		  rB[1] = lB[offB + 8];               
+		  rB[2] = lB[offB + 16];              
+		  rB[3] = lB[offB + 24];              
+		  rB[4] = lB[offB + 32];              
+		  rB[5] = lB[offB + 40];              
+		  offA += 49;                         
+		  offB += 49;                         
+		  rC[0][0]=mad(rA[0],rB[0],rC[0][0]); 
+		  rC[1][0]=mad(rA[1],rB[0],rC[1][0]); 
+		  rC[2][0]=mad(rA[2],rB[0],rC[2][0]); 
+		  rC[3][0]=mad(rA[3],rB[0],rC[3][0]); 
+		  rC[4][0]=mad(rA[4],rB[0],rC[4][0]); 
+		  rC[5][0]=mad(rA[5],rB[0],rC[5][0]); 
+		  rC[0][1]=mad(rA[0],rB[1],rC[0][1]); 
+		  rC[1][1]=mad(rA[1],rB[1],rC[1][1]); 
+		  rC[2][1]=mad(rA[2],rB[1],rC[2][1]); 
+		  rC[3][1]=mad(rA[3],rB[1],rC[3][1]); 
+		  rC[4][1]=mad(rA[4],rB[1],rC[4][1]); 
+		  rC[5][1]=mad(rA[5],rB[1],rC[5][1]); 
+		  rC[0][2]=mad(rA[0],rB[2],rC[0][2]); 
+		  rC[1][2]=mad(rA[1],rB[2],rC[1][2]); 
+		  rC[2][2]=mad(rA[2],rB[2],rC[2][2]); 
+		  rC[3][2]=mad(rA[3],rB[2],rC[3][2]); 
+		  rC[4][2]=mad(rA[4],rB[2],rC[4][2]); 
+		  rC[5][2]=mad(rA[5],rB[2],rC[5][2]); 
+		  rC[0][3]=mad(rA[0],rB[3],rC[0][3]); 
+		  rC[1][3]=mad(rA[1],rB[3],rC[1][3]); 
+		  rC[2][3]=mad(rA[2],rB[3],rC[2][3]); 
+		  rC[3][3]=mad(rA[3],rB[3],rC[3][3]); 
+		  rC[4][3]=mad(rA[4],rB[3],rC[4][3]); 
+		  rC[5][3]=mad(rA[5],rB[3],rC[5][3]); 
+		  rC[0][4]=mad(rA[0],rB[4],rC[0][4]); 
+		  rC[1][4]=mad(rA[1],rB[4],rC[1][4]); 
+		  rC[2][4]=mad(rA[2],rB[4],rC[2][4]); 
+		  rC[3][4]=mad(rA[3],rB[4],rC[3][4]); 
+		  rC[4][4]=mad(rA[4],rB[4],rC[4][4]); 
+		  rC[5][4]=mad(rA[5],rB[4],rC[5][4]); 
+		  rC[0][5]=mad(rA[0],rB[5],rC[0][5]); 
+		  rC[1][5]=mad(rA[1],rB[5],rC[1][5]); 
+		  rC[2][5]=mad(rA[2],rB[5],rC[2][5]); 
+		  rC[3][5]=mad(rA[3],rB[5],rC[3][5]); 
+		  rC[4][5]=mad(rA[4],rB[5],rC[4][5]); 
+		  rC[5][5]=mad(rA[5],rB[5],rC[5][5]); 
+		  barrier(CLK_LOCAL_MEM_FENCE);
+		}
+        A += lda << 3;
+        B += 8;block_k+=8;
+    } while (block_k < K);
+
+
+    int offset_x = gidx*48+ idx;
+    int offset_y = gidy*48+ idy;
+	
+
+  if(offset_x>=M ||offset_y>=N )
+    return;
+
+  C+= offset_x+ldc*offset_y; 
+ 
+ 
+  //for (int i=0; i<6; i++)
+  int i = 0;
+  do
+  {
+    C[0*ldc+i*8]  = alpha*rC[i][0]  ;
+	
+	if (offset_y+8<N)
+      C[8*ldc+i*8]  = alpha*rC[i][1] ;
+    
+	if (offset_y+16<N)
+	  C[16*ldc+i*8] = alpha*rC[i][2] ;
+    
+	if (offset_y+24<N)
+	  C[24*ldc+i*8] = alpha*rC[i][3];
+    
+	if (offset_y+32<N)
+      C[32*ldc+i*8] = alpha*rC[i][4];
+    
+	if (offset_y+40<N)
+	  C[40*ldc+i*8] = alpha*rC[i][5];
+
+	offset_x += 8;
+    if(offset_x>=M)
+        return;
+
+
+  }while (++i < 6);
+}
+";
\ No newline at end of file
diff --git a/src/library/blas/gens/clTemplates/dtrsm_gpu.cl b/src/library/blas/gens/clTemplates/dtrsm_gpu.cl
new file mode 100644
index 0000000..ab2218d
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/dtrsm_gpu.cl
@@ -0,0 +1,2004 @@
+
+static const char * dtrsm_gpu_kernels = "
+
+//#define __mul(i,j) ((i)*(j))
+//#define qmod(a, b) ((a)-(__mul((b), (a)/(b))))
+
+#define __mul(i,j) ((i)*(j))
+#define qmod(a, b) ((a)%(b))
+
+
+#define BLOCK_SIZE 16 // inner blocking size, <=32
+#define NB 128        // outer blocking size, >BLOCK_SIZE
+
+#define ZERO              ( 0.0)
+#define ONE               ( 1.0)
+
+#ifdef DOUBLE_PRECISION
+#ifdef cl_khr_fp64
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+#else
+#pragma OPENCL EXTENSION cl_amd_fp64 : enable
+#endif
+#endif
+
+__kernel void DIAG_DTRTRI_KERNEL_UPPER(int  isDiagUnit, 
+				       __global double const * restrict A,
+                                       uint offA,
+				       __global double *d_dinvA,
+				       uint lda,
+                                       uint na)
+{
+
+  int i, j;
+  double Ystx = 0;
+  __local double *y = 0;
+  double switcher;
+  double neg_switcher;
+
+  // Thread index
+  int tx = get_local_id(0);
+
+  // Thread index
+  int gx = get_global_id(0);
+
+  // Block index
+  int bx = get_group_id(0);
+
+  A = A + offA;
+
+  __global const double *Aoff = A + bx*lda*BLOCK_SIZE + bx*BLOCK_SIZE;
+  int NumBLperNB = NB/BLOCK_SIZE;
+  d_dinvA += bx/NumBLperNB*NB*NB + (bx % NumBLperNB)*(NB*BLOCK_SIZE + BLOCK_SIZE);
+
+  __local double Bs[BLOCK_SIZE*BLOCK_SIZE];
+  __local double workspace[BLOCK_SIZE];    // workspace used to store the current working column
+
+  // load A
+  #pragma unroll
+  for( i=0; i < BLOCK_SIZE; i++ )
+    {
+      if(tx <= i && i+bx*BLOCK_SIZE < na )
+        {
+	  Bs[i*BLOCK_SIZE+tx] = *(Aoff+i*lda+tx);    
+        }
+      else
+        {
+	  Bs[i*BLOCK_SIZE+tx] = ZERO; 
+        }
+    } 
+  // read in the whole square block of my A and zero out the non data triangular
+ 
+  // Synchronize to make sure the matrices are loaded
+  //__syncthreads(); 
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+  // solve the diagonals
+
+  if(isDiagUnit == 1)
+    {
+      Bs[tx*BLOCK_SIZE+tx] = ONE;
+    }
+  else
+    {
+      if( Bs[tx*BLOCK_SIZE+tx] == ZERO )
+	{
+	  Bs[tx*BLOCK_SIZE+tx] = ONE;  
+	}
+      else
+	{
+	  Bs[tx*BLOCK_SIZE+tx] = ONE / ( Bs[tx*BLOCK_SIZE+tx]) ;
+	}      
+    }
+
+
+  /* the upper case */
+  for( i=0; i < BLOCK_SIZE; i++ ) {
+    Ystx =  ZERO;
+    if( tx < i)
+      {
+	switcher = ONE;
+      }
+    else
+      {
+	switcher = ZERO;
+      }
+
+    //dtrmv
+    workspace[tx] = *(Bs+i*BLOCK_SIZE+tx);
+    y = Bs+i*BLOCK_SIZE;
+
+    #pragma unroll
+    //for( j=tx; j < i; j++ )
+    for( j=0; j < i; j++ )
+      Ystx += switcher * (*(Bs+j*BLOCK_SIZE+tx)*workspace[j]);
+
+    //sscal
+    // if (tx != i) y[tx]=switcher*Ystx*(-Bs[i*BLOCK_SIZE+i]);
+
+    if( tx != i)
+      {
+	switcher = ONE;
+	neg_switcher =  ZERO;
+      }
+    else
+      {
+	switcher = ZERO;
+	neg_switcher =  ONE;
+      }
+
+    y[tx] = switcher *Ystx*(-Bs[i*BLOCK_SIZE+i])+neg_switcher*y[tx];
+
+    // __syncthreads();
+    barrier(CLK_LOCAL_MEM_FENCE);
+  }
+
+  // write back A
+#pragma unroll
+  for( i=0; i < BLOCK_SIZE; i++ )
+    *(d_dinvA+i*NB+tx) = Bs[i*BLOCK_SIZE+tx];
+
+}
+
+
+//--------------------------------------------------------------------------//
+//--------------------------------------------------------------------------//
+//--------------------------------------------------------------------------//
+//--------------------------------------------------------------------------//
+
+
+__kernel void DIAG_DTRTRI_KERNEL_LOWER(int isDiagUnit, 
+				       __global double const * restrict A,
+                                       uint offA,
+				       __global double *d_dinvA,
+				       uint lda,
+                                       uint na)
+{
+  
+  int i, j;
+  double Ystx=  0;
+  __local double *Bw=0, *x=0, *y=0;
+  double switcher;
+  double neg_switcher;
+
+
+ // Thread index
+  int tx = get_local_id(0);
+  int txw;
+
+  int gx = get_global_id(0);
+
+  // Block index
+  int bx = get_group_id(0);
+
+  A = A + offA;
+
+  __global const double *Aoff = A+bx*lda*BLOCK_SIZE+bx*BLOCK_SIZE;
+  int NumBLperNB = NB/BLOCK_SIZE;
+  d_dinvA += bx/NumBLperNB*NB*NB + (bx % NumBLperNB)*(NB*BLOCK_SIZE + BLOCK_SIZE);
+
+  __local double Bs[BLOCK_SIZE*BLOCK_SIZE];
+  __local double workspace[BLOCK_SIZE];    // workspace used to store the current working column
+
+  // load A
+#pragma unroll
+  for( i=0; i < BLOCK_SIZE; i++ )
+    {
+      if(tx >= i && gx < na )
+        {
+	  Bs[i*BLOCK_SIZE+tx] = *(Aoff+i*lda+tx);
+        }
+      else
+        {
+	  Bs[i*BLOCK_SIZE+tx] = ZERO;
+        }
+    }
+
+  // read in the whole square block of my A and zero out the non data triangular
+  // not the upper or lower diagonal
+
+  // Synchronize to make sure the matrices are loaded
+  //__syncthreads();
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+
+  // solve the diagonals
+
+  if(isDiagUnit == 1)
+    {
+      Bs[tx*BLOCK_SIZE+tx] = ONE;
+    }
+  else
+    {
+      if( Bs[tx*BLOCK_SIZE+tx] == ZERO )
+	{
+	  Bs[tx*BLOCK_SIZE+tx] = ONE;  
+	}
+      else
+	{
+	  Bs[tx*BLOCK_SIZE+tx] = ONE / ( Bs[tx*BLOCK_SIZE+tx]) ;
+	}      
+    }
+
+  /*
+   * the lower case
+   */
+
+
+  if( !(tx < BLOCK_SIZE-1) )
+    {
+      switcher = ONE;
+    }
+  else
+    {
+      switcher = ZERO;
+    }
+
+  Bs[(BLOCK_SIZE-1)*BLOCK_SIZE+tx] = switcher * Bs[(BLOCK_SIZE-1)*BLOCK_SIZE+tx];    
+  // zero out the last column, except the diagonal element
+
+  for( i=BLOCK_SIZE-2; i >= 0; i-- ) {
+    Ystx =  ZERO;
+       
+    if( tx > i)
+      {
+	switcher = ONE;
+      }
+    else
+      {
+	switcher = ZERO;
+      }
+
+    //dtrmv
+    Bw = Bs+(i+1)*BLOCK_SIZE+i+1;
+    workspace[tx] = *(Bs+i*BLOCK_SIZE+tx);
+    x = workspace+i+1;
+    y = Bs+i*BLOCK_SIZE;
+
+    txw = (tx-i-1);
+
+#pragma unroll
+    for( j=0; j < BLOCK_SIZE-i-1; j++ )
+      Ystx += switcher*(*(Bw+j*BLOCK_SIZE+txw)*x[j]);
+
+    //sscal
+
+    if( tx != i)
+      {
+	switcher = ONE;
+	neg_switcher =  ZERO;
+      }
+    else
+      {
+	switcher = ZERO;
+	neg_switcher =  ONE;
+      }
+
+    y[tx] = switcher * Ystx*(-Bs[i*BLOCK_SIZE+i])+ neg_switcher *y[tx];
+
+    //__syncthreads();
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+  }
+
+  // write back A
+#pragma unroll
+  for( i=0; i < BLOCK_SIZE; i++ )
+    *(d_dinvA+i*NB+tx) = Bs[i*BLOCK_SIZE+tx];
+}
+
+
+/*
+ * daxpy computes c += alpha*b, where b and c are 16-element vectors.
+ */
+static void daxpy(
+    double alpha,
+    __local const double * __restrict__ b,
+    double * __restrict__ c )
+{
+    c[0]  += alpha * b[0];
+    c[1]  += alpha * b[1];
+    c[2]  += alpha * b[2];
+    c[3]  += alpha * b[3];
+    c[4]  += alpha * b[4];
+    c[5]  += alpha * b[5];
+    c[6]  += alpha * b[6];
+    c[7]  += alpha * b[7];
+    c[8]  += alpha * b[8];
+    c[9]  += alpha * b[9];
+    c[10] += alpha * b[10];
+    c[11] += alpha * b[11];
+    c[12] += alpha * b[12];
+    c[13] += alpha * b[13];
+    c[14] += alpha * b[14];
+    c[15] += alpha * b[15];
+}
+
+
+/*
+ * B21 = -inv(A11)*A12*inv(A22)
+ */
+__kernel void 
+TRIPLE_DGEMM_UPDATE_16_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, uint lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    //const int page = (blockIdx.y)%(npages);
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0) * (get_local_size(0)*get_local_size(1));
+    const int iby = bIdy * 16;
+    const int id = inx + iny*get_local_size(0);
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part one---------------------------//
+    {
+        // A12*inv(A22) -> A12
+        // A=A12, B=inv(A22), C=A12(d_dinvA)
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        int xa = page*blk*2 + ibx + id ; 
+        int ya = page*blk*2 + blk ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;  // macro READA will detect overflow on y dimension 
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+              
+        B = d_dinvA + blk*NB + blk;
+        C = d_dinvA + blk*NB;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4]  ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx   ][iny   ] = B[    0*ldb];
+            bs[inx   ][iny+ 4] = B[    4*ldb];
+            bs[inx   ][iny+ 8] = B[    8*ldb];
+            bs[inx   ][iny+12] = B[   12*ldb];
+            bs[inx+ 4][iny   ] = B[ 4+ 0*ldb];
+            bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb];
+            bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb];
+            bs[inx+ 4][iny+12] = B[ 4+12*ldb];
+            bs[inx+ 8][iny   ] = B[ 8+ 0*ldb];
+            bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb];
+            bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb];
+            bs[inx+ 8][iny+12] = B[ 8+12*ldb];
+            bs[inx+12][iny   ] = B[12+ 0*ldb];
+            bs[inx+12][iny+ 4] = B[12+ 4*ldb];
+            bs[inx+12][iny+ 8] = B[12+ 8*ldb];
+            bs[inx+12][iny+12] = B[12+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+    //__syncthreads();
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+#undef READA
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A11)*A12 -> A12
+        // A=inv(A11), B=A12, C=A12
+        __global double *A, *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        A = d_dinvA;
+        B = C = d_dinvA + blk*NB;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx   ][iny   ] = B[    0*ldb];
+            bs[inx   ][iny+ 4] = B[    4*ldb];
+            bs[inx   ][iny+ 8] = B[    8*ldb];
+            bs[inx   ][iny+12] = B[   12*ldb];
+            bs[inx+ 4][iny   ] = B[ 4+ 0*ldb];
+            bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb];
+            bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb];
+            bs[inx+ 4][iny+12] = B[ 4+12*ldb];
+            bs[inx+ 8][iny   ] = B[ 8+ 0*ldb];
+            bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb];
+            bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb];
+            bs[inx+ 8][iny+12] = B[ 8+12*ldb];
+            bs[inx+12][iny   ] = B[12+ 0*ldb];
+            bs[inx+12][iny+ 4] = B[12+ 4*ldb];
+            bs[inx+12][iny+ 8] = B[12+ 8*ldb];
+            bs[inx+12][iny+12] = B[12+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+}
+
+
+
+__kernel void
+TRIPLE_DGEMM_UPDATE_16_PART1_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    //const int page = (get_group_id(1))%(npages);
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0) * (get_local_size(0)*get_local_size(1));
+    const int iby = bIdy * 16;
+    const int id = inx + iny*get_local_size(0);
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    //--------------------------part one---------------------------//
+    {
+        // A21*inv(A11) -> A21
+        // A=A21, B=inv(A11), C=A21
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        int PagesPerNB = NB/(blk*2);
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        int xa = page*blk*2 + blk + ibx + id ; 
+        int ya = page*blk*2  ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;  // macro READA will detect overflow on y dimension 
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+              
+        B = d_dinvA;
+        C = d_dinvA + blk;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4]  ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx   ][iny   ] = B[    0*ldb];
+            bs[inx   ][iny+ 4] = B[    4*ldb];
+            bs[inx   ][iny+ 8] = B[    8*ldb];
+            bs[inx   ][iny+12] = B[   12*ldb];
+            bs[inx+ 4][iny   ] = B[ 4+ 0*ldb];
+            bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb];
+            bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb];
+            bs[inx+ 4][iny+12] = B[ 4+12*ldb];
+            bs[inx+ 8][iny   ] = B[ 8+ 0*ldb];
+            bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb];
+            bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb];
+            bs[inx+ 8][iny+12] = B[ 8+12*ldb];
+            bs[inx+12][iny   ] = B[12+ 0*ldb];
+            bs[inx+12][iny+ 4] = B[12+ 4*ldb];
+            bs[inx+12][iny+ 8] = B[12+ 8*ldb];
+            bs[inx+12][iny+12] = B[12+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+
+#undef READA
+
+    //__syncthreads();
+    barrier(CLK_LOCAL_MEM_FENCE);
+}
+
+
+
+/*
+ * B21 = -inv(A22)*A21*inv(A11)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_16_PART2_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0) * (get_local_size(0)*get_local_size(1));
+    const int iby = bIdy * 16;
+    const int id = inx + iny*get_local_size(0);
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A22)*A21 -> A21
+        // A=inv(A22), B=A21, C=A21
+        __global double *A, *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        int PagesPerNB = NB/(blk*2);
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        A = d_dinvA + blk*NB + blk;
+        B = C = d_dinvA + blk;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx   ][iny   ] = B[    0*ldb];
+            bs[inx   ][iny+ 4] = B[    4*ldb];
+            bs[inx   ][iny+ 8] = B[    8*ldb];
+            bs[inx   ][iny+12] = B[   12*ldb];
+            bs[inx+ 4][iny   ] = B[ 4+ 0*ldb];
+            bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb];
+            bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb];
+            bs[inx+ 4][iny+12] = B[ 4+12*ldb];
+            bs[inx+ 8][iny   ] = B[ 8+ 0*ldb];
+            bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb];
+            bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb];
+            bs[inx+ 8][iny+12] = B[ 8+12*ldb];
+            bs[inx+12][iny   ] = B[12+ 0*ldb];
+            bs[inx+12][iny+ 4] = B[12+ 4*ldb];
+            bs[inx+12][iny+ 8] = B[12+ 8*ldb];
+            bs[inx+12][iny+12] = B[12+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+    //__syncthreads();
+    barrier(CLK_LOCAL_MEM_FENCE);
+}
+
+/*
+ * B21 = -inv(A11)*A12*inv(A22)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_32_PART1_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0) * (get_local_size(0)*get_local_size(1));
+    const int iby = bIdy * 16;
+    const int id = inx + iny*get_local_size(0);
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part one---------------------------//
+    {
+        // A12*inv(A22) -> A21
+        // A=A12, B=inv(A22), C=A12(d_dinvA)
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        int xa = page*blk*2 + ibx + id ; 
+        int ya = page*blk*2 + blk ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;   // macro READA will detect overflow on y dimension  
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+
+        B = d_dinvA + blk*NB + blk;
+        C = d_dinvA + blk*NB;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4]  ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx  ][iny   ] = B[   0*ldb];
+            bs[inx  ][iny+ 4] = B[   4*ldb];
+            bs[inx  ][iny+ 8] = B[   8*ldb];
+            bs[inx  ][iny+12] = B[  12*ldb];
+            bs[inx+8][iny   ] = B[8+ 0*ldb];
+            bs[inx+8][iny+ 4] = B[8+ 4*ldb];
+            bs[inx+8][iny+ 8] = B[8+ 8*ldb];
+            bs[inx+8][iny+12] = B[8+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+
+    //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+}
+
+/*
+ * B21 = -inv(A11)*A12*inv(A22)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_32_PART2_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0) * (get_local_size(0)*get_local_size(1));
+    const int iby = bIdy * 16;
+    const int id = inx + iny*get_local_size(0);
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A11)*A12 -> A12
+        // A=inv(A11), B=A12, C=A12
+        __global double *A, *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        A = d_dinvA;
+        B = C = d_dinvA + blk*NB;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx  ][iny   ] = B[   0*ldb];
+            bs[inx  ][iny+ 4] = B[   4*ldb];
+            bs[inx  ][iny+ 8] = B[   8*ldb];
+            bs[inx  ][iny+12] = B[  12*ldb];
+            bs[inx+8][iny   ] = B[8+ 0*ldb];
+            bs[inx+8][iny+ 4] = B[8+ 4*ldb];
+            bs[inx+8][iny+ 8] = B[8+ 8*ldb];
+            bs[inx+8][iny+12] = B[8+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * B21 = -inv(A22)*A21*inv(A11)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_32_PART1_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0) * (get_local_size(0)*get_local_size(1));
+    const int iby = bIdy * 16;
+    const int id = inx + iny*get_local_size(0);
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part one---------------------------//
+    {
+        // A21*inv(A11) -> A21
+        // A=A21, B=inv(A11), C=A21
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        int xa = page*blk*2 + blk +  ibx + id ; 
+        int ya = page*blk*2 ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;  // macro READA will detect overflow on y dimension 
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+              
+        B = d_dinvA;
+        C = d_dinvA + blk;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4]  ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx  ][iny   ] = B[   0*ldb];
+            bs[inx  ][iny+ 4] = B[   4*ldb];
+            bs[inx  ][iny+ 8] = B[   8*ldb];
+            bs[inx  ][iny+12] = B[  12*ldb];
+            bs[inx+8][iny   ] = B[8+ 0*ldb];
+            bs[inx+8][iny+ 4] = B[8+ 4*ldb];
+            bs[inx+8][iny+ 8] = B[8+ 8*ldb];
+            bs[inx+8][iny+12] = B[8+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+
+    //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+}
+
+/*
+ * B21 = -inv(A22)*A21*inv(A11)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_32_PART2_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0) * (get_local_size(0)*get_local_size(1));
+    const int iby = bIdy * 16;
+    const int id = inx + iny*get_local_size(0);
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A22)*A21 -> A21
+        // A=inv(A22), B=A21, C=A21
+        __global const double *A;
+        __global double *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        A = d_dinvA + blk*NB + blk;
+        B = C = d_dinvA + blk;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx  ][iny   ] = B[   0*ldb];
+            bs[inx  ][iny+ 4] = B[   4*ldb];
+            bs[inx  ][iny+ 8] = B[   8*ldb];
+            bs[inx  ][iny+12] = B[  12*ldb];
+            bs[inx+8][iny   ] = B[8+ 0*ldb];
+            bs[inx+8][iny+ 4] = B[8+ 4*ldb];
+            bs[inx+8][iny+ 8] = B[8+ 8*ldb];
+            bs[inx+8][iny+12] = B[8+12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * B21 = -inv(A11)*A12*inv(A22)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_64_PART1_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part one---------------------------//
+    {
+        // A12*inv(A22) -> A12(d_dinvA)
+        // A=A12, B=inv(A22), C=A12
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        int xa = page*blk*2 + ibx + id ; 
+        int ya = page*blk*2 + blk ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;   // macro READA will detect overflow on y dimension
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+
+        B = d_dinvA + blk*NB + blk;
+        C = d_dinvA + blk*NB;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4]  ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+#undef READA
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * B21 = -inv(A11)*A12*inv(A22)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_64_PART2_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A11)*A12 -> A12
+        // A=inv(A11), B=A12, C=A12
+        __global const double *A;
+        __global double *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        A = d_dinvA;
+        B = C = d_dinvA + blk*NB;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * B21 = -inv(A22)*A21*inv(A11)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_64_PART1_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part one---------------------------//
+    {
+        // A21*inv(A11) -> A21
+        // A=A21, B=inv(A11), C=A21
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+        int xa = page*blk*2 + blk + ibx + id ; 
+        int ya = page*blk*2 ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;  // macro READA will detect overflow on y dimension 
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+              
+        B = d_dinvA;
+        C = d_dinvA + blk;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4]  ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ; 
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ; 
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ; 
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ; 
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+#undef READA
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * B21 = -inv(A22)*A21*inv(A11)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_64_PART2_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A22)*A21 -> A21
+        // A=inv(A22), B=A21, C=A21
+        __global const double *A;
+        __global double *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        A = d_dinvA + blk*NB + blk;
+        B = C = d_dinvA + blk;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * B21 = -inv(A11)*A12*inv(A22)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_ABOVE64_PART1_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part one---------------------------//
+    {
+        // A12*inv(A22) -> A12(d_dinvA)
+        // A=A12, B=inv(A22), C=A12
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        int xa = page*blk*2 + ibx + id ; 
+        int ya = page*blk*2 + blk ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;   // macro READA will detect overflow on y dimension
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+
+        B = d_dinvA + blk*NB + blk;
+        C = d_dinvA + blk*NB;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+}
+#undef READA
+
+/*
+ * B21 = -inv(A22)*A21*inv(A11)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_ABOVE64_PART1_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+    //--------------------------part one---------------------------//
+    {
+        // A21*inv(A11) -> A21
+        // A=A21, B=inv(A11), C=A21
+        __global const double *A;
+        __global double *B, *C;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        int xa = page*blk*2 + blk + ibx + id ; 
+        int ya = page*blk*2 ; 
+        int incA = ya * lda + xa ; 
+
+        // maxA will be used to detect overflow on all subsequent accesses on A(xa, ya:ya+???) 
+
+        int maxA ;
+        if ( xa < na ) 
+          maxA = lda*na ;  // macro READA will detect overflow on y dimension 
+        else  
+          maxA  = 0 ;  // there is already an overflow on xa 
+
+#define READA ( (incA < maxA ) ? Ain[incA] : 0 )  
+
+        B = d_dinvA;
+        C = d_dinvA + blk;
+
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4]  ;
+            a[0] = READA ; incA += lda ; 
+            a[1] = READA ; incA += lda ; 
+            a[2] = READA ; incA += lda ; 
+            a[3] = READA ; incA += lda ; 
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = READA ; incA += lda ;
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = READA ; incA += lda ;
+            daxpy( a[2], &bs[10][0], c );  a[2] = READA ; incA += lda ;
+            daxpy( a[3], &bs[11][0], c );  a[3] = READA ; incA += lda ;
+
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+#undef READA
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * B21 = -inv(A11)*A12*inv(A22)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_ABOVE64_PART2_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A11)*A12 -> A12
+        // A=inv(A11), B=A12, C=A12
+        __global const double *A;
+        __global double *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        A = d_dinvA;
+        B = d_dinvA + blk*NB;
+        C = d_dinvA + blk;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+}
+
+/*
+ * part 3, copy data into position
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_ABOVE64_PART3_R (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A11)*A12 -> A12
+        // A=inv(A11), B=A12, C=A12
+        __global double *C_temp, *C_real;
+        int ldc = NB;
+
+        C_temp = d_dinvA + NB*NB*(page/PagesPerNB)
+               + (qmod(page, PagesPerNB))*(blk*2)*NB
+               + (qmod(page, PagesPerNB))*(blk*2)
+               + blk;
+
+        C_real = d_dinvA + NB*NB*(page/PagesPerNB)
+               + (qmod(page, PagesPerNB))*(blk*2)*NB
+               + blk*NB
+               + (qmod(page, PagesPerNB))*(blk*2);
+
+        C_temp += ibx + id  + __mul( iby, ldc );
+        C_real += ibx + id  + __mul( iby, ldc );
+
+        for( int i = 0; i < 16; i++ ) {
+            C_real[0] = C_temp[0];
+            C_temp[0] = ZERO;
+            C_temp += ldc;
+            C_real += ldc;
+        }
+    }
+}
+
+/*
+ * part 3: copy data back to position
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_ABOVE64_PART3_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+
+    //--------------------------part three---------------------------//
+    {
+        // -inv(A22)*A21 -> A21
+        // A=inv(A22), B=A21, C=A21
+        __global double *C_temp, *C_real;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        C_real = d_dinvA + blk;
+
+        C_temp = d_dinvA + blk*NB;
+
+        C_temp += ibx + id  + __mul( iby, ldc );
+        C_real += ibx + id  + __mul( iby, ldc );
+
+        for( int i = 0; i < 16; i++ ) {
+            C_real[0] = C_temp[0];
+            C_temp[0] = ZERO;
+            C_real += ldc;
+            C_temp += ldc;
+        }
+    }
+    //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+}
+
+/*
+ * B21 = -inv(A22)*A21*inv(A11)
+ */
+__kernel void
+TRIPLE_DGEMM_UPDATE_ABOVE64_PART2_L (__global const double *Ain, uint offAin, __global double *d_dinvA, int blk, int lda, int npages, int na)
+{
+    const int bIdy = get_group_id(1)/npages;
+    const int page = qmod(get_group_id(1), npages);
+    const int inx = get_local_id(0);
+    const int iny = get_local_id(1);
+    const int ibx = get_group_id(0)*64;
+    const int iby = bIdy*16;
+    const int id = inx + iny*16;
+    __local double bs[16][17];
+
+    Ain = Ain + offAin;
+
+    int PagesPerNB = NB/(blk*2);
+
+    //--------------------------part two---------------------------//
+    {
+        // -inv(A22)*A21 -> A21
+        // A=inv(A22), B=A21, C=A21
+        __global double *A, *B, *C;
+        int lda = NB;
+        int ldb = NB;
+        int ldc = NB;
+
+        d_dinvA += NB*NB*(page/PagesPerNB)
+                + (qmod(page, PagesPerNB))*(blk*2)*NB
+                + (qmod(page, PagesPerNB))*(blk*2);
+
+        A = d_dinvA + blk*NB + blk;
+        B = d_dinvA + blk;
+
+        C = d_dinvA + blk*NB;
+
+        A += ibx + id;
+        B += inx + __mul( iby + iny, ldb );
+        C += ibx + id  + __mul( iby, ldc );
+
+        __global const double *Blast = B + blk;
+
+        double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+        do {
+            double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
+
+            bs[inx][iny   ] = B[ 0*ldb];
+            bs[inx][iny+ 4] = B[ 4*ldb];
+            bs[inx][iny+ 8] = B[ 8*ldb];
+            bs[inx][iny+12] = B[12*ldb];
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 0][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 1][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 2][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 3][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 4][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 5][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[ 6][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[ 7][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[ 8][0], c );  a[0] = A[0*lda];
+            daxpy( a[1], &bs[ 9][0], c );  a[1] = A[1*lda];
+            daxpy( a[2], &bs[10][0], c );  a[2] = A[2*lda];
+            daxpy( a[3], &bs[11][0], c );  a[3] = A[3*lda];
+
+            A += 4*lda;
+            daxpy( a[0], &bs[12][0], c );
+            daxpy( a[1], &bs[13][0], c );
+            daxpy( a[2], &bs[14][0], c );
+            daxpy( a[3], &bs[15][0], c );
+
+            B += 16;
+            //__syncthreads();
+	    barrier(CLK_LOCAL_MEM_FENCE);
+        } while( B < Blast );
+
+        for( int i = 0; i < 16; i++ ) {
+            C[0] = (-1)*c[i];
+            C += ldc;
+        }
+    }
+}
+
+
+";
diff --git a/src/library/blas/gens/clTemplates/sgemm_gcn.cl b/src/library/blas/gens/clTemplates/sgemm_gcn.cl
new file mode 100644
index 0000000..d569b13
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/sgemm_gcn.cl
@@ -0,0 +1,2083 @@
+static const char * sgemm_NT_96_96_16_16x16_6x6__ALPHABETA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NT_96_96_16_16x16_6x6__ALPHABETA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+//    GPtr uA, uB;
+//    uA.floatv = (__global float *)A;
+//    uB.floatv = (__global float *)B;
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*96+ idxT + idyT*lda;
+    B +=  gidy*96+ idxT + idyT*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idyT*97+idxT;
+        __local float* plB = lB + idyT*97+idxT;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+//        #pragma unroll 1
+//        for(unsigned int k = 0 ; k < 16; k+=1){
+//        }
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+   
+}
+
+";
+
+
+
+static const char * sgemm_NT_96_96_16_16x16_6x6__ALPHA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NT_96_96_16_16x16_6x6__ALPHA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+//    GPtr uA, uB;
+//    uA.floatv = (__global float *)A;
+//    uB.floatv = (__global float *)B;
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*96+ idxT + idyT*lda;
+    B +=  gidy*96+ idxT + idyT*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idyT*97+idxT;
+        __local float* plB = lB + idyT*97+idxT;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+//        #pragma unroll 1
+//        for(unsigned int k = 0 ; k < 16; k+=1){
+//        }
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C[32*ldc] = alpha*rC[0][2];
+    C[48*ldc] = alpha*rC[0][3];
+    C[64*ldc] = alpha*rC[0][4];
+    C[80*ldc] = alpha*rC[0][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    C[32*ldc] = alpha*rC[1][2];
+    C[48*ldc] = alpha*rC[1][3];
+    C[64*ldc] = alpha*rC[1][4];
+    C[80*ldc] = alpha*rC[1][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[16*ldc] = alpha*rC[2][1];
+    C[32*ldc] = alpha*rC[2][2];
+    C[48*ldc] = alpha*rC[2][3];
+    C[64*ldc] = alpha*rC[2][4];
+    C[80*ldc] = alpha*rC[2][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[16*ldc] = alpha*rC[3][1];
+    C[32*ldc] = alpha*rC[3][2];
+    C[48*ldc] = alpha*rC[3][3];
+    C[64*ldc] = alpha*rC[3][4];
+    C[80*ldc] = alpha*rC[3][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[4][0] ;
+    C[16*ldc] = alpha*rC[4][1];
+    C[32*ldc] = alpha*rC[4][2];
+    C[48*ldc] = alpha*rC[4][3];
+    C[64*ldc] = alpha*rC[4][4];
+    C[80*ldc] = alpha*rC[4][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[5][0] ;
+    C[16*ldc] = alpha*rC[5][1];
+    C[32*ldc] = alpha*rC[5][2];
+    C[48*ldc] = alpha*rC[5][3];
+    C[64*ldc] = alpha*rC[5][4];
+    C[80*ldc] = alpha*rC[5][5];
+   
+}
+
+";
+
+
+static const char * sgemm_NT_64_64_16_16x16_4x4__ALPHABETA = "
+
+#define  M4x4 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            offA += 65;								  \
+            offB += 65;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NT_64_64_16_16x16_4x4__ALPHABETA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[4][4]  = {(float)0};
+    float rA[1][4];
+    float rB[1][4];
+    
+//    GPtr uA, uB;
+//    uA.floatv = (__global float *)A;
+//    uB.floatv = (__global float *)B;
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1056];
+    __local float lB[1056];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*64+ idxT + idyT*lda;
+    B +=  gidy*64+ idxT + idyT*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idyT*65+idxT;
+        __local float* plB = lB + idyT*65+idxT;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+//        #pragma unroll 1
+//        for(unsigned int k = 0 ; k < 16; k+=1){
+//        }
+
+        M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*64+idx;
+    C+= gidy*64*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+
+   
+}
+
+";
+
+
+
+static const char * sgemm_NT_64_64_16_16x16_4x4__ALPHA = "
+
+#define  M4x4 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            offA += 65;								  \
+            offB += 65;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NT_64_64_16_16x16_4x4__ALPHA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[4][4]  = {(float)0};
+    float rA[1][4];
+    float rB[1][4];
+    
+//    GPtr uA, uB;
+//    uA.floatv = (__global float *)A;
+//    uB.floatv = (__global float *)B;
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1056];
+    __local float lB[1056];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*64+ idxT + idyT*lda;
+    B +=  gidy*64+ idxT + idyT*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idyT*65+idxT;
+        __local float* plB = lB + idyT*65+idxT;
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*64+idx;
+    C+= gidy*64*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C[32*ldc] = alpha*rC[0][2];
+    C[48*ldc] = alpha*rC[0][3];
+
+    C+=16;					  
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    C[32*ldc] = alpha*rC[1][2];
+    C[48*ldc] = alpha*rC[1][3];
+
+    C+=16;					  
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[16*ldc] = alpha*rC[2][1];
+    C[32*ldc] = alpha*rC[2][2];
+    C[48*ldc] = alpha*rC[2][3];
+
+    C+=16;					  
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[16*ldc] = alpha*rC[3][1];
+    C[32*ldc] = alpha*rC[3][2];
+    C[48*ldc] = alpha*rC[3][3];
+
+   
+}
+
+";
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static const char * sgemm_NN_96_96_16_16x16_6x6__ALPHABETA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NN_96_96_16_16x16_6x6__ALPHABETA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+//    GPtr uA, uB;
+//    uA.floatv = (__global float *)A;
+//    uB.floatv = (__global float *)B;
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*96+ idxT + idyT*lda;
+    B +=  (gidy*96+idyT)*ldb + idxT;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+
+        __local float* plA = lA + idyT*97+idxT;
+        __local float* plB = lB + idxT*97+idyT;
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+   
+}
+
+";
+
+
+
+static const char * sgemm_NN_96_96_16_16x16_6x6__ALPHA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NN_96_96_16_16x16_6x6__ALPHA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*96+ idxT + idyT*lda;
+    B +=  (gidy*96+idyT)*ldb + idxT;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+
+        __local float* plA = lA + idyT*97+idxT;
+        __local float* plB = lB + idxT*97+idyT;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+   
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C[32*ldc] = alpha*rC[0][2];
+    C[48*ldc] = alpha*rC[0][3];
+    C[64*ldc] = alpha*rC[0][4];
+    C[80*ldc] = alpha*rC[0][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    C[32*ldc] = alpha*rC[1][2];
+    C[48*ldc] = alpha*rC[1][3];
+    C[64*ldc] = alpha*rC[1][4];
+    C[80*ldc] = alpha*rC[1][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[16*ldc] = alpha*rC[2][1];
+    C[32*ldc] = alpha*rC[2][2];
+    C[48*ldc] = alpha*rC[2][3];
+    C[64*ldc] = alpha*rC[2][4];
+    C[80*ldc] = alpha*rC[2][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[16*ldc] = alpha*rC[3][1];
+    C[32*ldc] = alpha*rC[3][2];
+    C[48*ldc] = alpha*rC[3][3];
+    C[64*ldc] = alpha*rC[3][4];
+    C[80*ldc] = alpha*rC[3][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[4][0] ;
+    C[16*ldc] = alpha*rC[4][1];
+    C[32*ldc] = alpha*rC[4][2];
+    C[48*ldc] = alpha*rC[4][3];
+    C[64*ldc] = alpha*rC[4][4];
+    C[80*ldc] = alpha*rC[4][5];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[5][0] ;
+    C[16*ldc] = alpha*rC[5][1];
+    C[32*ldc] = alpha*rC[5][2];
+    C[48*ldc] = alpha*rC[5][3];
+    C[64*ldc] = alpha*rC[5][4];
+    C[80*ldc] = alpha*rC[5][5];
+   
+}
+
+";
+
+static const char * sgemm_NN_64_64_16_16x16_4x4__ALPHABETA = "
+
+
+#define  M4x4 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            offA += 65;								  \
+            offB += 65;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_64_64_16_16x16_4x4__ALPHABETA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[4][4]  = {(float)0};
+    float rA[1][4];
+    float rB[1][4];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1056];
+    __local float lB[1056];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*64+ idx + idy*lda;
+    B +=  (gidy*64+idy)*ldb+ idx;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*65+idx;
+        __local float* plB = lB + idx*65+idy;
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+   
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+
+        M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+    C+= gidx*64+idx;
+    C+= gidy*64*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+    
+   
+}
+";
+
+
+static const char * sgemm_NN_64_64_16_16x16_4x4__ALPHA = "
+
+
+#define  M4x4 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            offA += 65;								  \
+            offB += 65;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_64_64_16_16x16_4x4__ALPHA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[4][4]  = {(float)0};
+    float rA[1][4];
+    float rB[1][4];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1056];
+    __local float lB[1056];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*64+ idx + idy*lda;
+    B +=  (gidy*64+idy)*ldb+ idx;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*65+idx;
+        __local float* plB = lB + idx*65+idy;
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+   
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+
+        M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+		M4x4
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+    C+= gidx*64+idx;
+    C+= gidy*64*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C[32*ldc] = alpha*rC[0][2];
+    C[48*ldc] = alpha*rC[0][3];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    C[32*ldc] = alpha*rC[1][2];
+    C[48*ldc] = alpha*rC[1][3];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[16*ldc] = alpha*rC[2][1];
+    C[32*ldc] = alpha*rC[2][2];
+    C[48*ldc] = alpha*rC[2][3];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[16*ldc] = alpha*rC[3][1];
+    C[32*ldc] = alpha*rC[3][2];
+    C[48*ldc] = alpha*rC[3][3];
+    
+   
+}
+";
+
+
+
+
+
+
+
+
+/************************************************************************************/
+static const char * sgemm_TN_96_96_16_16x16_6x6__ALPHABETA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_TN_96_96_16_16x16_6x6__ALPHABETA( __global float const * restrict A,
+                                                     __global float const * restrict B,
+                                                     __global float * C,
+                                                     uint const M,
+                                                     uint const N,
+                                                     uint const K,
+                                                     float const alpha,
+                                                     float const beta,
+                                                     uint lda,
+                                                     uint ldb,
+                                                     uint ldc,
+                                                     uint offsetA,
+                                                     uint offsetB,
+                                                     uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  uint idt = 16*idy + idx;
+  uint idxT = idt % 16;
+  uint idyT = idt / 16;
+
+  A +=  gidx*96*lda+ idxT + idyT*lda;
+  B +=  gidy*96*ldb+ idxT + idyT*ldb;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idxT*97+idyT;
+    __local float* plB = lB + idxT*97+idyT;
+    // barrier(CLK_LOCAL_MEM_FENCE);
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+    plB[64] = B[64*ldb];
+    plB[80] = B[80*ldb];
+
+    plA[0] = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+    plA[64] = A[64*lda];
+    plA[80] = A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+    //}
+  } while (--block_k > 0);
+
+  C+= gidx*96+idx;
+  C+= gidy*96*ldc;
+  C+= idy*ldc;
+
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+
+}
+";
+
+
+static const char * sgemm_TN_96_96_16_16x16_6x6__ALPHA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_TN_96_96_16_16x16_6x6__ALPHA( __global float const * restrict A,
+                                                     __global float const * restrict B,
+                                                     __global float * C,
+                                                     uint const M,
+                                                     uint const N,
+                                                     uint const K,
+                                                     float const alpha,
+                                                     uint lda,
+                                                     uint ldb,
+                                                     uint ldc,
+                                                     uint offsetA,
+                                                     uint offsetB,
+                                                     uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  uint idt = 16*idy + idx;
+  uint idxT = idt % 16;
+  uint idyT = idt / 16;
+
+  A +=  gidx*96*lda+ idxT + idyT*lda;
+  B +=  gidy*96*ldb+ idxT + idyT*ldb;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idxT*97+idyT;
+    __local float* plB = lB + idxT*97+idyT;
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+    plB[64] = B[64*ldb];
+    plB[80] = B[80*ldb];
+
+    plA[0] = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+    plA[64] = A[64*lda];
+    plA[80] = A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+  C+= gidx*96+idx;
+  C+= gidy*96*ldc;
+  C+= idy*ldc;
+
+  C[0*ldc] = alpha*rC[0][0] ;
+  C[16*ldc] = alpha*rC[0][1];
+  C[32*ldc] = alpha*rC[0][2];
+  C[48*ldc] = alpha*rC[0][3];
+  C[64*ldc] = alpha*rC[0][4];
+  C[80*ldc] = alpha*rC[0][5];
+  C+=16;
+  C[0*ldc] = alpha*rC[1][0] ;
+  C[16*ldc] = alpha*rC[1][1];
+  C[32*ldc] = alpha*rC[1][2];
+  C[48*ldc] = alpha*rC[1][3];
+  C[64*ldc] = alpha*rC[1][4];
+  C[80*ldc] = alpha*rC[1][5];
+  C+=16;
+  C[0*ldc] = alpha*rC[2][0] ;
+  C[16*ldc] = alpha*rC[2][1];
+  C[32*ldc] = alpha*rC[2][2];
+  C[48*ldc] = alpha*rC[2][3];
+  C[64*ldc] = alpha*rC[2][4];
+  C[80*ldc] = alpha*rC[2][5];
+  C+=16;
+  C[0*ldc] = alpha*rC[3][0] ;
+  C[16*ldc] = alpha*rC[3][1];
+  C[32*ldc] = alpha*rC[3][2];
+  C[48*ldc] = alpha*rC[3][3];
+  C[64*ldc] = alpha*rC[3][4];
+  C[80*ldc] = alpha*rC[3][5];
+  C+=16;
+  C[0*ldc] = alpha*rC[4][0] ;
+  C[16*ldc] = alpha*rC[4][1];
+  C[32*ldc] = alpha*rC[4][2];
+  C[48*ldc] = alpha*rC[4][3];
+  C[64*ldc] = alpha*rC[4][4];
+  C[80*ldc] = alpha*rC[4][5];
+  C+=16;
+  C[0*ldc] = alpha*rC[5][0] ;
+  C[16*ldc] = alpha*rC[5][1];
+  C[32*ldc] = alpha*rC[5][2];
+  C[48*ldc] = alpha*rC[5][3];
+  C[64*ldc] = alpha*rC[5][4];
+  C[80*ldc] = alpha*rC[5][5];
+
+}
+";
+
+
+static const char * sgemm_TN_64_64_16_16x16_4x4__ALPHABETA = "
+
+#define  M4x4 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            offA += 65;								  \
+            offB += 65;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+			      barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_TN_64_64_16_16x16_4x4__ALPHABETA( __global float const * restrict A,
+                                                     __global float const * restrict B,
+                                                     __global float * C,
+                                                     uint const M,
+                                                     uint const N,
+                                                     uint const K,
+                                                     float const alpha,
+                                                     float const beta,
+                                                     uint lda,
+                                                     uint ldb,
+                                                     uint ldc,
+                                                     uint offsetA,
+                                                     uint offsetB,
+                                                     uint offsetC)
+{
+  float rC[4][4]  = {(float)0};
+  float rA[1][4];
+  float rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1056];
+  __local float lB[1056];
+
+  uint gidx = get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  uint idt = 16*idy + idx;
+  uint idxT = idt % 16;
+  uint idyT = idt / 16;
+
+  A +=  gidx*64*lda+ idxT + idyT*lda;
+  B +=  gidy*64*ldb+ idxT + idyT*ldb;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idxT*65+idyT;
+    __local float* plB = lB + idxT*65+idyT;
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+
+    plA[0] = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+
+    A += 16;
+    B += 16;
+    
+  } while (--block_k > 0);
+
+  C+= gidx*64+idx;
+  C+= gidy*64*ldc;
+  C+= idy*ldc;
+
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+
+  C+=16;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+
+  C+=16;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+
+  C+=16;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+}
+";
+
+
+
+static const char * sgemm_TN_64_64_16_16x16_4x4__ALPHA = "
+
+#define  M4x4 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            offA += 65;								  \
+            offB += 65;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+			      barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_TN_64_64_16_16x16_4x4__ALPHA( __global float const * restrict A,
+                                                     __global float const * restrict B,
+                                                     __global float * C,
+                                                     uint const M,
+                                                     uint const N,
+                                                     uint const K,
+                                                     float const alpha,
+                                                     uint lda,
+                                                     uint ldb,
+                                                     uint ldc,
+                                                     uint offsetA,
+                                                     uint offsetB,
+                                                     uint offsetC)
+{
+  float rC[4][4]  = {(float)0};
+  float rA[1][4];
+  float rB[1][4];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1056];
+  __local float lB[1056];
+
+  uint gidx = get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  uint idt = 16*idy + idx;
+  uint idxT = idt % 16;
+  uint idyT = idt / 16;
+
+  A +=  gidx*64*lda+ idxT + idyT*lda;
+  B +=  gidy*64*ldb+ idxT + idyT*ldb;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idxT*65+idyT;
+    __local float* plB = lB + idxT*65+idyT;
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+
+    plA[0] = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+    M4x4
+
+    A += 16;
+    B += 16;
+    
+  } while (--block_k > 0);
+
+  C+= gidx*64+idx;
+  C+= gidy*64*ldc;
+  C+= idy*ldc;
+
+  C[0*ldc] = alpha*rC[0][0]  ;
+  C[16*ldc] = alpha*rC[0][1] ;
+  C[32*ldc] = alpha*rC[0][2] ;
+  C[48*ldc] = alpha*rC[0][3] ;
+
+  C+=16;
+  C[0*ldc] = alpha*rC[1][0] ; 
+  C[16*ldc] = alpha*rC[1][1] ;
+  C[32*ldc] = alpha*rC[1][2] ;
+  C[48*ldc] = alpha*rC[1][3] ;
+
+  C+=16;
+  C[0*ldc] = alpha*rC[2][0]  ;
+  C[16*ldc] = alpha*rC[2][1] ;
+  C[32*ldc] = alpha*rC[2][2] ;
+  C[48*ldc] = alpha*rC[2][3] ;
+
+  C+=16;
+  C[0*ldc] = alpha*rC[3][0]  ;
+  C[16*ldc] = alpha*rC[3][1] ;
+  C[32*ldc] = alpha*rC[3][2] ;
+  C[48*ldc] = alpha*rC[3][3] ;
+}
+";
diff --git a/src/library/blas/gens/clTemplates/sgemm_gcn_SmallMatrices.cl b/src/library/blas/gens/clTemplates/sgemm_gcn_SmallMatrices.cl
new file mode 100644
index 0000000..73d9dc3
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/sgemm_gcn_SmallMatrices.cl
@@ -0,0 +1,786 @@
+static const char * sgemm_NT_32_32_16_16x16_2x2__ALPHABETA = "
+
+#define  M2x2 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            offA += 33;								  \
+            offB += 33;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NT_32_32_16_16x16_2x2__ALPHABETA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[2][2]  = {(float)0};
+    float rA[1][2];
+    float rB[1][2];
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[544];
+    __local float lB[544];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  gidy*32+ idxT + idyT*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idyT*33+idxT;
+        __local float* plB = lB + idyT*33+idxT;
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+               
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*32+idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    
+}
+
+";
+
+
+
+static const char * sgemm_NT_32_32_16_16x16_2x2__ALPHA = "
+
+
+#define  M2x2 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            offA += 33;								  \
+            offB += 33;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NT_32_32_16_16x16_2x2__ALPHA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[2][2]  = {(float)0};
+    float rA[1][2];
+    float rB[1][2];
+
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[544];
+    __local float lB[544];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  gidy*32+ idxT + idyT*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idyT*33+idxT;
+        __local float* plB = lB + idyT*33+idxT;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+//        #pragma unroll 1
+//        for(unsigned int k = 0 ; k < 16; k+=1){
+//        }
+
+        M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*32+idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    
+}
+
+";
+
+static const char * sgemm_NT_32_32_16_16x16_2x2__ALPHABETA_BRANCH = "
+
+#define  M2x2 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            offA += 33;								  \
+            offB += 33;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+			mem_fence(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NT_32_32_16_16x16_2x2__ALPHABETA_BRANCH( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[2][2]  = {(float)0};
+    float rA[1][2];
+    float rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[528];//16*32+16
+    __local float lB[528];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*32+ idx;
+	int CurrentOffSetB = gidy*32+ idx;
+    
+    A +=  gidx*32+ idx + idy*lda;
+    B +=  gidy*32+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*33+idx;
+        __local float* plB = lB + idy*33+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+
+        A += lda<<4;
+        B += ldb<<4;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*32+idx;
+    int offset_y = gidy*32+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+
+	    
+	}
+    while (++i < 2);
+    
+}
+
+";
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static const char * sgemm_NN_32_32_16_16x16_2x2__ALPHABETA = "
+
+#define  M2x2 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            offA += 33;								  \
+            offB += 33;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NN_32_32_16_16x16_2x2__ALPHABETA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[2][2]  = {(float)0};
+    float rA[1][2];
+    float rB[1][2];
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[544];
+    __local float lB[544];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  (gidy*32+idyT)*ldb + idxT;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	  {
+
+      __local float* plA = lA + idyT*33+idxT;
+      __local float* plB = lB + idxT*33+idyT;
+      plB[0] = B[0];
+      plB[16] = B[16*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+      plA[16] = A[16+0*lda];
+              
+      barrier(CLK_LOCAL_MEM_FENCE);
+      uint offA = idx;
+      uint offB = idy;
+
+
+      M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+
+      A += lda<<4;
+      B += 16;
+	  } while (--block_k > 0);
+
+    C+= gidx*32+idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+	  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+}
+
+";
+
+
+
+static const char * sgemm_NN_32_32_16_16x16_2x2__ALPHA = "
+
+#define  M2x2 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            offA += 33;								  \
+            offB += 33;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_NN_32_32_16_16x16_2x2__ALPHA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[2][2]  = {(float)0};
+    float rA[1][2];
+    float rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[544];
+    __local float lB[544];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  gidx*32+ idxT + idyT*lda;
+    B +=  (gidy*32+idyT)*ldb + idxT;
+    
+   
+  uint block_k = K >> 4;
+  do 
+	{
+    __local float* plA = lA + idyT*33+idxT;
+    __local float* plB = lB + idxT*33+idyT;
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+        
+	  plA[0] = A[0+0*lda];
+    plA[16] = A[16+0*lda];
+       
+        
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+
+    A += lda<<4;
+    B += 16;
+   
+	} while (--block_k > 0);
+
+    C+= gidx*32+idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+	  C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1]; 
+}
+
+";
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static const char * sgemm_TN_32_32_16_16x16_2x2__ALPHABETA = "
+
+#define  M2x2 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            offA += 33;								  \
+            offB += 33;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_TN_32_32_16_16x16_2x2__ALPHABETA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[2][2]  = {(float)0};
+    float rA[1][2];
+    float rB[1][2];
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[544];
+    __local float lB[544];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A += (gidx*32+idyT)*lda+ idxT ;
+    B += (gidy*32+idyT)*ldb + idxT;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	  {
+
+      __local float* plA = lA + idxT*33+idyT;
+      __local float* plB = lB + idxT*33+idyT;
+      plB[0] = B[0];
+      plB[16] = B[16*ldb];
+	   
+	    plA[0] = A[0];
+      plA[16] = A[16*lda];
+              
+      barrier(CLK_LOCAL_MEM_FENCE);
+      uint offA = idx;
+      uint offB = idy;
+
+
+      M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+		  M2x2
+
+      A += 16;
+      B += 16;
+	  } while (--block_k > 0);
+
+    C+= gidx*32+idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+	  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+}
+
+";
+
+
+
+static const char * sgemm_TN_32_32_16_16x16_2x2__ALPHA = "
+
+#define  M2x2 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            offA += 33;								  \
+            offB += 33;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+
+__kernel void sgemm_TN_32_32_16_16x16_2x2__ALPHA( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[2][2]  = {(float)0};
+    float rA[1][2];
+    float rB[1][2];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[544];
+    __local float lB[544];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    uint idt = 16*idy + idx;
+    uint idxT = idt % 16;
+    uint idyT = idt / 16;
+    
+    A +=  (gidx*32+idyT)*lda+ idxT ;
+    B +=  (gidy*32+idyT)*ldb + idxT;
+    
+   
+  uint block_k = K >> 4;
+  do 
+	{
+    __local float* plA = lA + idxT*33+idyT;
+    __local float* plB = lB + idxT*33+idyT;
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+        
+	  plA[0] = A[0];
+    plA[16] = A[16*lda];
+       
+        
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+		M2x2
+
+    A += 16;
+    B += 16;
+   
+	} while (--block_k > 0);
+
+    C+= gidx*32+idx;
+    C+= gidy*32*ldc;
+    C+= idy*ldc;
+    
+	  C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C+=16;					  
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1]; 
+}
+
+";
+
diff --git a/src/library/blas/gens/clTemplates/sgemm_hawaiiSplitKernel.cl b/src/library/blas/gens/clTemplates/sgemm_hawaiiSplitKernel.cl
new file mode 100644
index 0000000..9489538
--- /dev/null
+++ b/src/library/blas/gens/clTemplates/sgemm_hawaiiSplitKernel.cl
@@ -0,0 +1,6158 @@
+static const char * sgemm_NT_16_SPLIT__ALPHABETA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			mem_fence(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_96_16_16x16_6x6__ALPHABETA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+//        #pragma unroll 1
+//        for(unsigned int k = 0 ; k < 16; k+=1){
+//        }
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_96_16_16x16_6x6__ALPHABETA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+	int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+      C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+      C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+      C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_1_16_16x16_6x6__ALPHABETA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	      plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+        M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+  C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+  do 
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	    
+	}
+    while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_1_16_16x16_6x6__ALPHABETA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+
+	    
+	}
+    while (++i < 6);
+}
+";
+
+static const char * sgemm_NT_16_SPLIT__ALPHA = "
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_96_16_16x16_6x6__ALPHA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+//        #pragma unroll 1
+//        for(unsigned int k = 0 ; k < 16; k+=1){
+//        }
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0]  ;
+    C[16*ldc] = alpha*rC[0][1] ;
+    C[32*ldc] = alpha*rC[0][2] ;
+    C[48*ldc] = alpha*rC[0][3] ;
+    C[64*ldc] = alpha*rC[0][4] ;
+    C[80*ldc] = alpha*rC[0][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[1][0]  ;
+    C[16*ldc] = alpha*rC[1][1] ;
+    C[32*ldc] = alpha*rC[1][2] ;
+    C[48*ldc] = alpha*rC[1][3] ;
+    C[64*ldc] = alpha*rC[1][4] ;
+    C[80*ldc] = alpha*rC[1][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[2][0]  ;
+    C[16*ldc] = alpha*rC[2][1] ;
+    C[32*ldc] = alpha*rC[2][2] ;
+    C[48*ldc] = alpha*rC[2][3] ;
+    C[64*ldc] = alpha*rC[2][4] ;
+    C[80*ldc] = alpha*rC[2][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[3][0]  ;
+    C[16*ldc] = alpha*rC[3][1] ;
+    C[32*ldc] = alpha*rC[3][2] ;
+    C[48*ldc] = alpha*rC[3][3] ;
+    C[64*ldc] = alpha*rC[3][4] ;
+    C[80*ldc] = alpha*rC[3][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[4][0]  ;
+    C[16*ldc] = alpha*rC[4][1] ;
+    C[32*ldc] = alpha*rC[4][2] ;
+    C[48*ldc] = alpha*rC[4][3] ;
+    C[64*ldc] = alpha*rC[4][4] ;
+    C[80*ldc] = alpha*rC[4][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[5][0]  ;
+    C[16*ldc] = alpha*rC[5][1] ;
+    C[32*ldc] = alpha*rC[5][2] ;
+    C[48*ldc] = alpha*rC[5][3] ;
+    C[64*ldc] = alpha*rC[5][4] ;
+    C[80*ldc] = alpha*rC[5][5] ;
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_96_16_16x16_6x6__ALPHA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+	int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+      C[16*ldc] = alpha * rC[i][1];
+      C[32*ldc] = alpha * rC[i][2];
+      C[48*ldc] = alpha * rC[i][3];
+      C[64*ldc] = alpha * rC[i][4];
+      C[80*ldc] = alpha * rC[i][5];
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_1_16_16x16_6x6__ALPHA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	    plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+	  if(offset_y+16<N)
+        C[16*ldc] = alpha * rC[i][1];
+      if(offset_y+32<N)
+        C[32*ldc] = alpha * rC[i][2];
+      if(offset_y+48<N)
+        C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)
+        C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)
+        C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	    
+	}
+    while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_1_16_16x16_6x6__ALPHA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += ldb<<4;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+	  if(offset_y+16<N)
+        C[16*ldc] = alpha * rC[i][1];
+      if(offset_y+32<N)
+        C[32*ldc] = alpha * rC[i][2];
+      if(offset_y+48<N)
+        C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)
+        C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)
+        C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	    
+	}
+    while (++i < 6);
+}
+";
+
+
+
+
+
+
+
+
+
+
+
+
+/*********************************************************************************************************************/
+
+static const char * sgemm_NT_1_SPLIT__ALPHABETA = "
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_96_1_16x16_6x6__ALPHABETA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+
+
+        A += lda<<4;
+        B += ldb<<4;
+        block_k+=16 ;
+    } while (block_k < K);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_96_1_16x16_6x6__ALPHABETA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+	int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//= K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+        A += lda<<4;
+        B += ldb<<4;
+	    block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+      C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+      C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+      C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_1_1_16x16_6x6__ALPHABETA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	    plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+        A += lda<<4;
+        B += ldb<<4;
+	    block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	    
+	}
+    while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_1_1_16x16_6x6__ALPHABETA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+        A += lda<<4;
+        B += ldb<<4;
+        block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+
+	    
+	}
+    while (++i < 6);
+}
+";
+
+static const char * sgemm_NT_1_SPLIT__ALPHA = "
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_96_1_16x16_6x6__ALPHA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k =0;// K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+        A += lda<<4;
+        B += ldb<<4;
+        block_k+=16;
+    } while (block_k < K);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0]  ;
+    C[16*ldc] = alpha*rC[0][1] ;
+    C[32*ldc] = alpha*rC[0][2] ;
+    C[48*ldc] = alpha*rC[0][3] ;
+    C[64*ldc] = alpha*rC[0][4] ;
+    C[80*ldc] = alpha*rC[0][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[1][0]  ;
+    C[16*ldc] = alpha*rC[1][1] ;
+    C[32*ldc] = alpha*rC[1][2] ;
+    C[48*ldc] = alpha*rC[1][3] ;
+    C[64*ldc] = alpha*rC[1][4] ;
+    C[80*ldc] = alpha*rC[1][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[2][0]  ;
+    C[16*ldc] = alpha*rC[2][1] ;
+    C[32*ldc] = alpha*rC[2][2] ;
+    C[48*ldc] = alpha*rC[2][3] ;
+    C[64*ldc] = alpha*rC[2][4] ;
+    C[80*ldc] = alpha*rC[2][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[3][0]  ;
+    C[16*ldc] = alpha*rC[3][1] ;
+    C[32*ldc] = alpha*rC[3][2] ;
+    C[48*ldc] = alpha*rC[3][3] ;
+    C[64*ldc] = alpha*rC[3][4] ;
+    C[80*ldc] = alpha*rC[3][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[4][0]  ;
+    C[16*ldc] = alpha*rC[4][1] ;
+    C[32*ldc] = alpha*rC[4][2] ;
+    C[48*ldc] = alpha*rC[4][3] ;
+    C[64*ldc] = alpha*rC[4][4] ;
+    C[80*ldc] = alpha*rC[4][5] ;
+    C+=16;					   
+    C[0*ldc] = alpha*rC[5][0]  ;
+    C[16*ldc] = alpha*rC[5][1] ;
+    C[32*ldc] = alpha*rC[5][2] ;
+    C[48*ldc] = alpha*rC[5][3] ;
+    C[64*ldc] = alpha*rC[5][4] ;
+    C[80*ldc] = alpha*rC[5][5] ;
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_96_1_16x16_6x6__ALPHA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+	int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0] = B[0+0*ldb];
+        plB[16] = B[16+0*ldb];
+        plB[32] = B[32+0*ldb];
+        plB[48] = B[48+0*ldb];
+        plB[64] = B[64+0*ldb];
+        plB[80] = B[80+0*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+        A += lda<<4;
+        B += ldb<<4;
+		block_k+=16;
+    } while (block_k < K);
+
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+      C[16*ldc] = alpha * rC[i][1];
+      C[32*ldc] = alpha * rC[i][2];
+      C[48*ldc] = alpha * rC[i][3];
+      C[64*ldc] = alpha * rC[i][4];
+      C[80*ldc] = alpha * rC[i][5];
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_96_1_1_16x16_6x6__ALPHA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	    plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+        A += lda<<4;
+        B += ldb<<4;
+        block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+	  if(offset_y+16<N)
+        C[16*ldc] = alpha * rC[i][1];
+      if(offset_y+32<N)
+        C[32*ldc] = alpha * rC[i][2];
+      if(offset_y+48<N)
+        C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)
+        C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)
+        C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	    
+	}
+    while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NT_1_1_1_16x16_6x6__ALPHA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;// K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idy*97+idx;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+        A += lda<<4;
+        B += ldb<<4;
+		block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+	  if(offset_y+16<N)
+        C[16*ldc] = alpha * rC[i][1];
+      if(offset_y+32<N)
+        C[32*ldc] = alpha * rC[i][2];
+      if(offset_y+48<N)
+        C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)
+        C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)
+        C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	    
+	}
+    while (++i < 6);
+}
+";
+
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static const char * sgemm_NN_16_SPLIT__ALPHABETA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_96_16_16x16_6x6__ALPHABETA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	      plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+        M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+	      M6x6
+
+        A += lda<<4;
+        B += 16;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_96_16_16x16_6x6__ALPHABETA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+   int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	      plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+      C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+      C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+      C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_1_16_16x16_6x6__ALPHABETA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+    int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+	   
+	    plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	    
+	}
+    while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_1_16_16x16_6x6__ALPHABETA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+
+	    
+	}
+    while (++i < 6);
+}
+";
+
+
+
+static const char * sgemm_NN_16_SPLIT__ALPHA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_96_16_16x16_6x6__ALPHA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+//        #pragma unroll 1
+//        for(unsigned int k = 0 ; k < 16; k+=1){
+//        }
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+    //}
+	} while (--block_k > 0);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C[32*ldc] = alpha*rC[0][2];
+    C[48*ldc] = alpha*rC[0][3];
+    C[64*ldc] = alpha*rC[0][4];
+    C[80*ldc] = alpha*rC[0][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    C[32*ldc] = alpha*rC[1][2];
+    C[48*ldc] = alpha*rC[1][3];
+    C[64*ldc] = alpha*rC[1][4];
+    C[80*ldc] = alpha*rC[1][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[16*ldc] = alpha*rC[2][1];
+    C[32*ldc] = alpha*rC[2][2];
+    C[48*ldc] = alpha*rC[2][3];
+    C[64*ldc] = alpha*rC[2][4];
+    C[80*ldc] = alpha*rC[2][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[16*ldc] = alpha*rC[3][1];
+    C[32*ldc] = alpha*rC[3][2];
+    C[48*ldc] = alpha*rC[3][3];
+    C[64*ldc] = alpha*rC[3][4];
+    C[80*ldc] = alpha*rC[3][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[4][0] ;
+    C[16*ldc] = alpha*rC[4][1];
+    C[32*ldc] = alpha*rC[4][2];
+    C[48*ldc] = alpha*rC[4][3];
+    C[64*ldc] = alpha*rC[4][4];
+    C[80*ldc] = alpha*rC[4][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[5][0] ;
+    C[16*ldc] = alpha*rC[5][1];
+    C[32*ldc] = alpha*rC[5][2];
+    C[48*ldc] = alpha*rC[5][3];
+    C[64*ldc] = alpha*rC[5][4];
+    C[80*ldc] = alpha*rC[5][5];
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_96_16_16x16_6x6__ALPHA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+	int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+      C[16*ldc] = alpha * rC[i][1];
+      C[32*ldc] = alpha * rC[i][2];
+      C[48*ldc] = alpha * rC[i][3];
+      C[64*ldc] = alpha * rC[i][4];
+      C[80*ldc] = alpha * rC[i][5];
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_1_16_16x16_6x6__ALPHA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+	   
+	    plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0] ;
+	  if(offset_y+16<N)
+        C[16*ldc] = alpha * rC[i][1];
+      if(offset_y+32<N)
+        C[32*ldc] = alpha * rC[i][2];
+      if(offset_y+48<N)
+        C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)
+        C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)
+        C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	    
+	}
+    while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_1_16_16x16_6x6__ALPHA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+		M6x6
+
+        A += lda<<4;
+        B += 16;
+	} while (--block_k > 0);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0] ;
+	  if(offset_y+16<N)				   
+        C[16*ldc] = alpha * rC[i][1];
+      if(offset_y+32<N)		 	    
+        C[32*ldc] = alpha * rC[i][2];
+      if(offset_y+48<N)		 	    
+        C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)		 	    
+        C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)		    
+        C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+
+	    
+	}
+    while (++i < 6);
+}
+";
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+static const char * sgemm_NN_1_SPLIT__ALPHABETA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_96_1_16x16_6x6__ALPHABETA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    int block_k = 0;//K >> 4
+    do 
+	{
+   // for(unsigned int block_k=0 ; block_k< K ; block_k+=16)
+	//{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+       // barrier(CLK_LOCAL_MEM_FENCE);
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	    plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+
+
+        A += lda<<4;
+        B += 16;
+        block_k+=16;
+    } while (block_k < K);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+    C+=16;
+    C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+    C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+    C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+    C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+    C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+    C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_96_1_16x16_6x6__ALPHABETA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+	int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    int block_k = 0;//K >> 4
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+
+        A += lda<<4;
+        B += 16;
+        block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+      C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+      C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+      C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_1_1_16x16_6x6__ALPHABETA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    int block_k = 0;//K >> 4
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+	   
+	    plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+
+        A += lda<<4;
+        B += 16;
+        block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	    
+	}
+    while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_1_1_16x16_6x6__ALPHABETA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    int block_k = 0;//K >> 4
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+
+	  // plB[0]  = (CurrentOffSetB>=N || idx>=M)?0.0:B[0];
+	  // plB[16] = (CurrentOffSetB+16>=N || idx>=M)?0.0:B[16*ldb];
+	  // plB[32] = (CurrentOffSetB+32>=N || idx>=M)?0.0:B[32*ldb];
+	  // plB[48] = (CurrentOffSetB+48>=N || idx>=M)?0.0:B[48*ldb];
+	  // plB[64] = (CurrentOffSetB+64>=N || idx>=M)?0.0:B[64*ldb];
+	  // plB[80] = (CurrentOffSetB+80>=N || idx>=M)?0.0:B[80*ldb];
+
+	   
+	    plA[0]  = (CurrentOffSetA>=M )?0.0:A[0];
+        plA[16] = (CurrentOffSetA+16>=M )?0.0:A[16];
+        plA[32] = (CurrentOffSetA+32>=M )?0.0:A[32];
+        plA[48] = (CurrentOffSetA+48>=M )?0.0:A[48];
+        plA[64] = (CurrentOffSetA+64>=M )?0.0:A[64];
+        plA[80] = (CurrentOffSetA+80>=M )?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+        A += lda<<4;
+        B += 16;
+		block_k+=16;
+	}  while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+	  if(offset_y+16<N)
+        C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+      if(offset_y+32<N)
+        C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+      if(offset_y+48<N)
+        C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+	  if(offset_y+64<N)
+        C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+	  if(offset_y+80<N)
+        C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+
+	    
+	}
+    while (++i < 6);
+}
+";
+
+
+
+static const char * sgemm_NN_1_SPLIT__ALPHA = "
+
+#define  M6x6 \
+            rA[0][0] = lA[offA + 0];				  \
+            rA[0][1] = lA[offA + 16];				  \
+            rA[0][2] = lA[offA + 32];				  \
+            rA[0][3] = lA[offA + 48];				  \
+            rA[0][4] = lA[offA + 64];				  \
+            rA[0][5] = lA[offA + 80];				  \
+            rB[0][0] = lB[offB + 0];				  \
+            rB[0][1] = lB[offB + 16];				  \
+            rB[0][2] = lB[offB + 32];				  \
+            rB[0][3] = lB[offB + 48];				  \
+            rB[0][4] = lB[offB + 64];				  \
+            rB[0][5] = lB[offB + 80];				  \
+            offA += 97;								  \
+            offB += 97;								  \
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_96_1_16x16_6x6__ALPHA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+    {
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+      
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	      plA[0] = A[0+0*lda];
+        plA[16] = A[16+0*lda];
+        plA[32] = A[32+0*lda];
+        plA[48] = A[48+0*lda];
+        plA[64] = A[64+0*lda];
+        plA[80] = A[80+0*lda];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	      {
+
+	          rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+            barrier(CLK_LOCAL_MEM_FENCE);
+        }
+
+
+
+        A += lda<<4;
+        B += 16;
+        block_k+=16;
+    } while (block_k < K);
+
+    C+= gidx*96+idx;
+    C+= gidy*96*ldc;
+    C+= idy*ldc;
+    
+	C[0*ldc] = alpha*rC[0][0] ;
+    C[16*ldc] = alpha*rC[0][1];
+    C[32*ldc] = alpha*rC[0][2];
+    C[48*ldc] = alpha*rC[0][3];
+    C[64*ldc] = alpha*rC[0][4];
+    C[80*ldc] = alpha*rC[0][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[1][0] ;
+    C[16*ldc] = alpha*rC[1][1];
+    C[32*ldc] = alpha*rC[1][2];
+    C[48*ldc] = alpha*rC[1][3];
+    C[64*ldc] = alpha*rC[1][4];
+    C[80*ldc] = alpha*rC[1][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[2][0] ;
+    C[16*ldc] = alpha*rC[2][1];
+    C[32*ldc] = alpha*rC[2][2];
+    C[48*ldc] = alpha*rC[2][3];
+    C[64*ldc] = alpha*rC[2][4];
+    C[80*ldc] = alpha*rC[2][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[3][0] ;
+    C[16*ldc] = alpha*rC[3][1];
+    C[32*ldc] = alpha*rC[3][2];
+    C[48*ldc] = alpha*rC[3][3];
+    C[64*ldc] = alpha*rC[3][4];
+    C[80*ldc] = alpha*rC[3][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[4][0] ;
+    C[16*ldc] = alpha*rC[4][1];
+    C[32*ldc] = alpha*rC[4][2];
+    C[48*ldc] = alpha*rC[4][3];
+    C[64*ldc] = alpha*rC[4][4];
+    C[80*ldc] = alpha*rC[4][5];
+    C+=16;					  ;
+    C[0*ldc] = alpha*rC[5][0] ;
+    C[16*ldc] = alpha*rC[5][1];
+    C[32*ldc] = alpha*rC[5][2];
+    C[48*ldc] = alpha*rC[5][3];
+    C[64*ldc] = alpha*rC[5][4];
+    C[80*ldc] = alpha*rC[5][5];
+   
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_96_1_16x16_6x6__ALPHA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+
+	int CurrentOffSetA = gidx*96+ idx;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0] = B[0];
+        plB[16] = B[16*ldb];
+        plB[32] = B[32*ldb];
+        plB[48] = B[48*ldb];
+        plB[64] = B[64*ldb];
+        plB[80] = B[80*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+
+
+        A += lda<<4;
+        B += 16;
+        block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+	if(offset_x>=M )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0];
+      C[16*ldc] = alpha * rC[i][1];
+      C[32*ldc] = alpha * rC[i][2];
+      C[48*ldc] = alpha * rC[i][3];
+      C[64*ldc] = alpha * rC[i][4];
+      C[80*ldc] = alpha * rC[i][5];
+      C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+	}
+    while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_96_1_1_16x16_6x6__ALPHA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+	   
+	      plA[0]  = A[0];
+        plA[16] = A[16];
+        plA[32] = A[32];
+        plA[48] = A[48];
+        plA[64] = A[64];
+        plA[80] = A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	      {
+
+	          rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			      barrier(CLK_LOCAL_MEM_FENCE);
+        }
+
+
+
+        A += lda<<4;
+        B += 16;
+        block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+	if(offset_y>=N )
+      return;
+
+  C+=offset_x+offset_y*ldc;
+    
+  int i = 0;
+  do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0] ;
+	  if(offset_y+16<N)
+      C[16*ldc] = alpha * rC[i][1];
+    if(offset_y+32<N)
+      C[32*ldc] = alpha * rC[i][2];
+    if(offset_y+48<N)
+      C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)
+      C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)
+      C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	    
+	}
+  while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+__kernel void sgemm_NN_1_1_1_16x16_6x6__ALPHA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+    float rC[6][6]  = {(float)0};
+    float rA[1][6];
+    float rB[1][6];
+    
+    
+    A += offsetA;
+    B += offsetB;
+    C+=offsetC;
+    
+    __local float lA[1552];
+    __local float lB[1552];
+    
+    uint gidx = M/96;//get_group_id(0);
+    uint gidy = N/96;//get_group_id(1);
+    uint idx = get_local_id(0);
+    uint idy = get_local_id(1);
+    
+	int CurrentOffSetA = gidx*96+ idx;
+	int CurrentOffSetB = gidy*96+ idy;
+    
+    A +=  gidx*96+ idx + idy*lda;
+    B +=  gidy*96*ldb+ idx + idy*ldb;
+    
+   
+    uint block_k = 0;//K >> 4;
+    do 
+	{
+        __local float* plA = lA + idy*97+idx;
+        __local float* plB = lB + idx*97+idy;
+
+        plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+        plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+        plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+        plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+        plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+        plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+	   
+	    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+        plA[16] = CurrentOffSetA+16>=M?0.0:A[16];
+        plA[32] = CurrentOffSetA+32>=M?0.0:A[32];
+        plA[48] = CurrentOffSetA+48>=M?0.0:A[48];
+        plA[64] = CurrentOffSetA+64>=M?0.0:A[64];
+        plA[80] = CurrentOffSetA+80>=M?0.0:A[80];
+
+        
+        barrier(CLK_LOCAL_MEM_FENCE);
+        uint offA = idx;
+        uint offB = idy;
+
+
+        #pragma unroll 1
+        for(unsigned int k = 0 ; k < min(16u, K-block_k ); k+=1)
+	    {
+
+	        rA[0][0] = lA[offA + 0];				  
+            rA[0][1] = lA[offA + 16];				  
+            rA[0][2] = lA[offA + 32];				  
+            rA[0][3] = lA[offA + 48];				  
+            rA[0][4] = lA[offA + 64];				  
+            rA[0][5] = lA[offA + 80];				  
+            rB[0][0] = lB[offB + 0];				  
+            rB[0][1] = lB[offB + 16];				  
+            rB[0][2] = lB[offB + 32];				  
+            rB[0][3] = lB[offB + 48];				  
+            rB[0][4] = lB[offB + 64];				  
+            rB[0][5] = lB[offB + 80];				  
+            offA += 97;								  
+            offB += 97;								  
+            rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); 
+            rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); 
+            rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); 
+            rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); 
+            rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); 
+            rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); 
+            rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); 
+            rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); 
+            rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); 
+            rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); 
+            rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); 
+            rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); 
+            rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); 
+            rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); 
+            rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); 
+            rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); 
+            rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); 
+            rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); 
+            rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); 
+            rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); 
+            rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); 
+            rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); 
+            rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); 
+            rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); 
+            rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); 
+            rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); 
+            rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); 
+            rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); 
+            rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); 
+            rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); 
+            rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); 
+            rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); 
+            rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); 
+            rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); 
+            rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); 
+            rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); 
+			barrier(CLK_LOCAL_MEM_FENCE);
+
+
+        }
+
+
+
+        A += lda<<4;
+        B += 16;
+        block_k+=16;
+    } while (block_k < K);
+
+
+	int offset_x = gidx*96+idx;
+    int offset_y = gidy*96+ idy;
+
+    if(offset_x>=M || offset_y>=N )
+      return;
+
+    C+=offset_x+offset_y*ldc;
+    
+	int i = 0;
+    do 
+	//for (int i=0; i<6; i++)
+	{
+	  C[0     ] = alpha * rC[i][0] ;
+	  if(offset_y+16<N)				   
+        C[16*ldc] = alpha * rC[i][1];
+      if(offset_y+32<N)		 	    
+        C[32*ldc] = alpha * rC[i][2];
+      if(offset_y+48<N)		 	    
+        C[48*ldc] = alpha * rC[i][3];
+	  if(offset_y+64<N)		 	    
+        C[64*ldc] = alpha * rC[i][4];
+	  if(offset_y+80<N)		    
+        C[80*ldc] = alpha * rC[i][5];
+      
+	  C+=16;
+	  offset_x+=16;
+	  if(offset_x>=M )
+        return;
+
+	    
+	}
+    while (++i < 6);
+}
+";
+
+
+
+
+/*************************************************************************************************/
+static const char * sgemm_TN_16_SPLIT__ALPHABETA = "
+
+#define  M6x6 \
+  rA[0][0] = lA[offA + 0];				  \
+  rA[0][1] = lA[offA + 16];				  \
+  rA[0][2] = lA[offA + 32];				  \
+  rA[0][3] = lA[offA + 48];				  \
+  rA[0][4] = lA[offA + 64];				  \
+  rA[0][5] = lA[offA + 80];				  \
+  rB[0][0] = lB[offB + 0];				  \
+  rB[0][1] = lB[offB + 16];				  \
+  rB[0][2] = lB[offB + 32];				  \
+  rB[0][3] = lB[offB + 48];				  \
+  rB[0][4] = lB[offB + 64];				  \
+  rB[0][5] = lB[offB + 80];				  \
+  offA += 97;								  \
+  offB += 97;								  \
+  rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+  rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+  rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+  rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+  rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+  rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+  rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+  rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+  rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+  rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+  rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+  rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+  rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+  rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+  rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+  rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+  rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+  rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+  rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+  rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+  rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+  rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+  rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+  rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+  rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+  rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+  rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+  rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+  rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+  rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+  rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+  rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+  rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+  rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+  rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+  rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_96_96_16_16x16_6x6__ALPHABETA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+    plB[64] = B[64*ldb];
+    plB[80] = B[80*ldb];
+
+    plA[0] = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+    plA[64] = A[64*lda];
+    plA[80] = A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+  C+= gidx*96+idx;
+  C+= gidy*96*ldc;
+  C+= idy*ldc;
+
+  C[0*ldc] = alpha*rC[0][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[0][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[0][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[0][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[0][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[0][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[1][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[1][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[1][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[1][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[1][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[1][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[2][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[2][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[2][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[2][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[2][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[2][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[3][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[3][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[3][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[3][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[3][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[3][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[4][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[4][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[4][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[4][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[4][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[4][5] + beta*C[80*ldc];
+  C+=16;
+  C[0*ldc] = alpha*rC[5][0] + beta*C[0*ldc];
+  C[16*ldc] = alpha*rC[5][1] + beta*C[16*ldc];
+  C[32*ldc] = alpha*rC[5][2] + beta*C[32*ldc];
+  C[48*ldc] = alpha*rC[5][3] + beta*C[48*ldc];
+  C[64*ldc] = alpha*rC[5][4] + beta*C[64*ldc];
+  C[80*ldc] = alpha*rC[5][5] + beta*C[80*ldc];
+
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_1_96_16_16x16_6x6__ALPHABETA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = M/96;//get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+
+  int CurrentOffSetA = gidx*96+ idy;
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+    plB[64] = B[64*ldb];
+    plB[80] = B[80*ldb];
+
+    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+    plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+    plA[32] = CurrentOffSetA+32>=M?0.0:A[32*lda];
+    plA[48] = CurrentOffSetA+48>=M?0.0:A[48*lda];
+    plA[64] = CurrentOffSetA+64>=M?0.0:A[64*lda];
+    plA[80] = CurrentOffSetA+80>=M?0.0:A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+
+  int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+  if(offset_x>=M )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+  int i = 0;
+  do 
+  //for (int i=0; i<6; i++)
+  {
+    C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+    C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+    C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+    C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+    C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+    C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+    C+=16;
+    offset_x+=16;
+    if(offset_x>=M )
+      return;
+  }
+  while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_96_1_16_16x16_6x6__ALPHABETA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = get_group_id(0);
+  uint gidy = N/96;//get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  int CurrentOffSetB = gidy*96+ idy;
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+    plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+    plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+    plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+    plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+    plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+
+    plA[0]  = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+    plA[64] = A[64*lda];
+    plA[80] = A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+
+  int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+  if(offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+  int i = 0;
+  do 
+  //for (int i=0; i<6; i++)
+  {
+    C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+    if(offset_y+16<N)
+      C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+    if(offset_y+32<N)
+      C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+    if(offset_y+48<N)
+      C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+    if(offset_y+64<N)
+      C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+    if(offset_y+80<N)
+      C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+
+    C+=16;
+
+  }
+  while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_1_1_16_16x16_6x6__ALPHABETA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  float const beta,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = M/96;//get_group_id(0);
+  uint gidy = N/96;//get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  int CurrentOffSetA = gidx*96+ idy;
+  int CurrentOffSetB = gidy*96+ idy;
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+    plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+    plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+    plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+    plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+    plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+
+    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+    plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+    plA[32] = CurrentOffSetA+32>=M?0.0:A[32*lda];
+    plA[48] = CurrentOffSetA+48>=M?0.0:A[48*lda];
+    plA[64] = CurrentOffSetA+64>=M?0.0:A[64*lda];
+    plA[80] = CurrentOffSetA+80>=M?0.0:A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+
+  int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+  int i = 0;
+  do 
+  //for (int i=0; i<6; i++)
+  {
+    C[0     ] = mad(alpha, rC[i][0], beta*C[0]);
+    if(offset_y+16<N)
+      C[16*ldc] = mad(alpha, rC[i][1], beta*C[16*ldc]);
+    if(offset_y+32<N)
+      C[32*ldc] = mad(alpha, rC[i][2], beta*C[32*ldc]);
+    if(offset_y+48<N)
+      C[48*ldc] = mad(alpha, rC[i][3], beta*C[48*ldc]);
+    if(offset_y+64<N)
+      C[64*ldc] = mad(alpha, rC[i][4], beta*C[64*ldc]);
+    if(offset_y+80<N)
+      C[80*ldc] = mad(alpha, rC[i][5], beta*C[80*ldc]);
+
+    C+=16;
+    offset_x+=16;
+    if(offset_x>=M )
+      return;
+
+
+  }
+  while (++i < 6);
+}
+";
+
+static const char * sgemm_TN_16_SPLIT__ALPHA = "
+
+#define  M6x6 \
+  rA[0][0] = lA[offA + 0];				  \
+  rA[0][1] = lA[offA + 16];				  \
+  rA[0][2] = lA[offA + 32];				  \
+  rA[0][3] = lA[offA + 48];				  \
+  rA[0][4] = lA[offA + 64];				  \
+  rA[0][5] = lA[offA + 80];				  \
+  rB[0][0] = lB[offB + 0];				  \
+  rB[0][1] = lB[offB + 16];				  \
+  rB[0][2] = lB[offB + 32];				  \
+  rB[0][3] = lB[offB + 48];				  \
+  rB[0][4] = lB[offB + 64];				  \
+  rB[0][5] = lB[offB + 80];				  \
+  offA += 97;								  \
+  offB += 97;								  \
+  rC[0][0]=mad(rA[0][0],rB[0][0],rC[0][0]); \
+  rC[1][0]=mad(rA[0][1],rB[0][0],rC[1][0]); \
+  rC[2][0]=mad(rA[0][2],rB[0][0],rC[2][0]); \
+  rC[3][0]=mad(rA[0][3],rB[0][0],rC[3][0]); \
+  rC[4][0]=mad(rA[0][4],rB[0][0],rC[4][0]); \
+  rC[5][0]=mad(rA[0][5],rB[0][0],rC[5][0]); \
+  rC[0][1]=mad(rA[0][0],rB[0][1],rC[0][1]); \
+  rC[1][1]=mad(rA[0][1],rB[0][1],rC[1][1]); \
+  rC[2][1]=mad(rA[0][2],rB[0][1],rC[2][1]); \
+  rC[3][1]=mad(rA[0][3],rB[0][1],rC[3][1]); \
+  rC[4][1]=mad(rA[0][4],rB[0][1],rC[4][1]); \
+  rC[5][1]=mad(rA[0][5],rB[0][1],rC[5][1]); \
+  rC[0][2]=mad(rA[0][0],rB[0][2],rC[0][2]); \
+  rC[1][2]=mad(rA[0][1],rB[0][2],rC[1][2]); \
+  rC[2][2]=mad(rA[0][2],rB[0][2],rC[2][2]); \
+  rC[3][2]=mad(rA[0][3],rB[0][2],rC[3][2]); \
+  rC[4][2]=mad(rA[0][4],rB[0][2],rC[4][2]); \
+  rC[5][2]=mad(rA[0][5],rB[0][2],rC[5][2]); \
+  rC[0][3]=mad(rA[0][0],rB[0][3],rC[0][3]); \
+  rC[1][3]=mad(rA[0][1],rB[0][3],rC[1][3]); \
+  rC[2][3]=mad(rA[0][2],rB[0][3],rC[2][3]); \
+  rC[3][3]=mad(rA[0][3],rB[0][3],rC[3][3]); \
+  rC[4][3]=mad(rA[0][4],rB[0][3],rC[4][3]); \
+  rC[5][3]=mad(rA[0][5],rB[0][3],rC[5][3]); \
+  rC[0][4]=mad(rA[0][0],rB[0][4],rC[0][4]); \
+  rC[1][4]=mad(rA[0][1],rB[0][4],rC[1][4]); \
+  rC[2][4]=mad(rA[0][2],rB[0][4],rC[2][4]); \
+  rC[3][4]=mad(rA[0][3],rB[0][4],rC[3][4]); \
+  rC[4][4]=mad(rA[0][4],rB[0][4],rC[4][4]); \
+  rC[5][4]=mad(rA[0][5],rB[0][4],rC[5][4]); \
+  rC[0][5]=mad(rA[0][0],rB[0][5],rC[0][5]); \
+  rC[1][5]=mad(rA[0][1],rB[0][5],rC[1][5]); \
+  rC[2][5]=mad(rA[0][2],rB[0][5],rC[2][5]); \
+  rC[3][5]=mad(rA[0][3],rB[0][5],rC[3][5]); \
+  rC[4][5]=mad(rA[0][4],rB[0][5],rC[4][5]); \
+  rC[5][5]=mad(rA[0][5],rB[0][5],rC[5][5]); \
+  barrier(CLK_LOCAL_MEM_FENCE);
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_96_96_16_16x16_6x6__ALPHA_SPLIT_MAIN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+    plB[64] = B[64*ldb];
+    plB[80] = B[80*ldb];
+
+    plA[0] = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+    plA[64] = A[64*lda];
+    plA[80] = A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+  C+= gidx*96+idx;
+  C+= gidy*96*ldc;
+  C+= idy*ldc;
+
+  C[0*ldc] = alpha*rC[0][0]  ;
+  C[16*ldc] = alpha*rC[0][1] ;
+  C[32*ldc] = alpha*rC[0][2] ;
+  C[48*ldc] = alpha*rC[0][3] ;
+  C[64*ldc] = alpha*rC[0][4] ;
+  C[80*ldc] = alpha*rC[0][5] ;
+  C+=16;
+  C[0*ldc] = alpha*rC[1][0]  ;
+  C[16*ldc] = alpha*rC[1][1] ;
+  C[32*ldc] = alpha*rC[1][2] ;
+  C[48*ldc] = alpha*rC[1][3] ;
+  C[64*ldc] = alpha*rC[1][4] ;
+  C[80*ldc] = alpha*rC[1][5] ;
+  C+=16;
+  C[0*ldc] = alpha*rC[2][0]  ;
+  C[16*ldc] = alpha*rC[2][1] ;
+  C[32*ldc] = alpha*rC[2][2] ;
+  C[48*ldc] = alpha*rC[2][3] ;
+  C[64*ldc] = alpha*rC[2][4] ;
+  C[80*ldc] = alpha*rC[2][5] ;
+  C+=16;
+  C[0*ldc] = alpha*rC[3][0]  ;
+  C[16*ldc] = alpha*rC[3][1] ;
+  C[32*ldc] = alpha*rC[3][2] ;
+  C[48*ldc] = alpha*rC[3][3] ;
+  C[64*ldc] = alpha*rC[3][4] ;
+  C[80*ldc] = alpha*rC[3][5] ;
+  C+=16;
+  C[0*ldc] = alpha*rC[4][0]  ;
+  C[16*ldc] = alpha*rC[4][1] ;
+  C[32*ldc] = alpha*rC[4][2] ;
+  C[48*ldc] = alpha*rC[4][3] ;
+  C[64*ldc] = alpha*rC[4][4] ;
+  C[80*ldc] = alpha*rC[4][5] ;
+  C+=16;
+  C[0*ldc] = alpha*rC[5][0]  ;
+  C[16*ldc] = alpha*rC[5][1] ;
+  C[32*ldc] = alpha*rC[5][2] ;
+  C[48*ldc] = alpha*rC[5][3] ;
+  C[64*ldc] = alpha*rC[5][4] ;
+  C[80*ldc] = alpha*rC[5][5] ;
+
+}
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_1_96_16_16x16_6x6__ALPHA_SPLIT_ROW( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = M/96;//get_group_id(0);
+  uint gidy = get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+
+  int CurrentOffSetA = gidx*96+ idy;
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0] = B[0];
+    plB[16] = B[16*ldb];
+    plB[32] = B[32*ldb];
+    plB[48] = B[48*ldb];
+    plB[64] = B[64*ldb];
+    plB[80] = B[80*ldb];
+
+    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+    plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+    plA[32] = CurrentOffSetA+32>=M?0.0:A[32*lda];
+    plA[48] = CurrentOffSetA+48>=M?0.0:A[48*lda];
+    plA[64] = CurrentOffSetA+64>=M?0.0:A[64*lda];
+    plA[80] = CurrentOffSetA+80>=M?0.0:A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+
+  int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+  if(offset_x>=M )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+  int i = 0;
+  do 
+  //for (int i=0; i<6; i++)
+  {
+    C[0     ] = mad(alpha, rC[i][0], 0);
+    C[16*ldc] = mad(alpha, rC[i][1], 0);
+    C[32*ldc] = mad(alpha, rC[i][2], 0);
+    C[48*ldc] = mad(alpha, rC[i][3], 0);
+    C[64*ldc] = mad(alpha, rC[i][4], 0);
+    C[80*ldc] = mad(alpha, rC[i][5], 0);
+    C+=16;
+    offset_x+=16;
+    if(offset_x>=M )
+      return;
+  }
+  while (++i < 6);
+}
+
+
+
+
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_96_1_16_16x16_6x6__ALPHA_SPLIT_COLUMN( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = get_group_id(0);
+  uint gidy = N/96;//get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  int CurrentOffSetB = gidy*96+ idy;
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+    plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+    plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+    plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+    plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+    plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+
+    plA[0]  = A[0];
+    plA[16] = A[16*lda];
+    plA[32] = A[32*lda];
+    plA[48] = A[48*lda];
+    plA[64] = A[64*lda];
+    plA[80] = A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+
+  int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+  if(offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+  int i = 0;
+  do 
+  //for (int i=0; i<6; i++)
+  {
+    C[0     ] = mad(alpha, rC[i][0], 0);
+    if(offset_y+16<N)
+      C[16*ldc] = mad(alpha, rC[i][1], 0);
+    if(offset_y+32<N)
+      C[32*ldc] = mad(alpha, rC[i][2], 0);
+    if(offset_y+48<N)
+      C[48*ldc] = mad(alpha, rC[i][3], 0);
+    if(offset_y+64<N)
+      C[64*ldc] = mad(alpha, rC[i][4], 0);
+    if(offset_y+80<N)
+      C[80*ldc] = mad(alpha, rC[i][5], 0);
+
+    C+=16;
+
+  }
+  while (++i < 6);
+}
+
+__attribute__((reqd_work_group_size(16,16,1)))
+  __kernel void sgemm_TN_1_1_16_16x16_6x6__ALPHA_SPLIT_SINGLE( __global float const * restrict A,
+  __global float const * restrict B,
+  __global float * C,
+  uint const M,
+  uint const N,
+  uint const K,
+  float const alpha,
+  uint lda,
+  uint ldb,
+  uint ldc,
+  uint offsetA,
+  uint offsetB,
+  uint offsetC)
+{
+  float rC[6][6]  = {(float)0};
+  float rA[1][6];
+  float rB[1][6];
+
+
+  A += offsetA;
+  B += offsetB;
+  C+=offsetC;
+
+  __local float lA[1552];
+  __local float lB[1552];
+
+  uint gidx = M/96;//get_group_id(0);
+  uint gidy = N/96;//get_group_id(1);
+  uint idx = get_local_id(0);
+  uint idy = get_local_id(1);
+
+  int CurrentOffSetA = gidx*96+ idy;
+  int CurrentOffSetB = gidy*96+ idy;
+
+  A +=  (gidx*96+idy)*lda + idx;
+  B +=  (gidy*96+idy)*ldb + idx;
+
+
+  uint block_k = K >> 4;
+  do 
+  {
+    __local float* plA = lA + idx*97+idy;
+    __local float* plB = lB + idx*97+idy;
+
+    plB[0]  = CurrentOffSetB>=N?0.0:B[0];
+    plB[16] = CurrentOffSetB+16>=N?0.0:B[16*ldb];
+    plB[32] = CurrentOffSetB+32>=N?0.0:B[32*ldb];
+    plB[48] = CurrentOffSetB+48>=N?0.0:B[48*ldb];
+    plB[64] = CurrentOffSetB+64>=N?0.0:B[64*ldb];
+    plB[80] = CurrentOffSetB+80>=N?0.0:B[80*ldb];
+
+    plA[0]  = CurrentOffSetA>=M?0.0:A[0];
+    plA[16] = CurrentOffSetA+16>=M?0.0:A[16*lda];
+    plA[32] = CurrentOffSetA+32>=M?0.0:A[32*lda];
+    plA[48] = CurrentOffSetA+48>=M?0.0:A[48*lda];
+    plA[64] = CurrentOffSetA+64>=M?0.0:A[64*lda];
+    plA[80] = CurrentOffSetA+80>=M?0.0:A[80*lda];
+
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+    uint offA = idx;
+    uint offB = idy;
+
+
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+    M6x6
+
+    A += 16;
+    B += 16;
+  } while (--block_k > 0);
+
+
+  int offset_x = gidx*96+idx;
+  int offset_y = gidy*96+ idy;
+
+  if(offset_x>=M || offset_y>=N )
+    return;
+
+  C+=offset_x+offset_y*ldc;
+
+  int i = 0;
+  do 
+  //for (int i=0; i<6; i++)
+  {
+    C[0     ] = mad(alpha, rC[i][0], 0);
+    if(offset_y+16<N)
+      C[16*ldc] = mad(alpha, rC[i][1], 0);
+    if(offset_y+32<N)
+      C[32*ldc] = mad(alpha, rC[i][2], 0);
+    if(offset_y+48<N)
+      C[48*ldc] = mad(alpha, rC[i][3], 0);
+    if(offset_y+64<N)
+      C[64*ldc] = mad(alpha, rC[i][4], 0);
+    if(offset_y+80<N)
+      C[80*ldc] = mad(alpha, rC[i][5], 0);
+
+    C+=16;
+    offset_x+=16;
+    if(offset_x>=M )
+      return;
+
+
+  }
+  while (++i < 6);
+}
+";
+
diff --git a/src/library/blas/include/clblas-internal.h b/src/library/blas/include/clblas-internal.h
index 7a9afcd..e9a2d42 100644
--- a/src/library/blas/include/clblas-internal.h
+++ b/src/library/blas/include/clblas-internal.h
@@ -130,6 +130,19 @@ typedef struct CLBlasKargs {
     reductionType redctnType;   // To store kind of reduction for reduction-framewrok to handle -- enum
 } CLBlasKargs;
 
+
+/**
+ * @internal
+ * @brief Initialize the binary cache (on disk) for OpenCL programs 
+ */
+void clblasInitBinaryCache(void);
+
+/* 
+ * Clear all registered functor caches 
+ */
+void cleanFunctorCaches(void);
+
+
 static __inline bool
 areKernelsCacheable(void)
 {
@@ -213,11 +226,26 @@ getQueueProperties(
     cl_command_queue queue,
     cl_command_queue_properties *props);
 
+
+Kernel
+*makeKernelCached(
+    cl_device_id device,
+    cl_context context,
+    solver_id_t sid,
+    KernelKey * key,
+    SolverKgen kernelGenerator,
+    const SubproblemDim *dims,
+    const PGranularity *pgran,
+    const CLBLASKernExtra *extra,
+    const char *buildOpts,
+    cl_int *error);
+
 Kernel
 *makeKernel(
     cl_device_id device,
     cl_context context,
     SolverKgen kernelGenerator,
+    cl_program program,
     const SubproblemDim *dims,
     const PGranularity *pgran,
     const CLBLASKernExtra *extra,
diff --git a/src/library/blas/init.c b/src/library/blas/init.c
index 2b257a8..31cb3b0 100644
--- a/src/library/blas/init.c
+++ b/src/library/blas/init.c
@@ -52,6 +52,9 @@ clblasSetup(void)
     // printf("\n%s, line %d\n", __func__, __LINE__);
     initMallocTrace();
 
+
+    clblasInitBinaryCache();
+
     clblasSolvers[CLBLAS_GEMM].nrPatterns =
         initGemmMemPatterns(clblasSolvers[CLBLAS_GEMM].memPatterns);
     clblasSolvers[CLBLAS_GEMM].defaultPattern = -1;
@@ -215,6 +218,13 @@ clblasSetup(void)
     return clblasSuccess;
 }
 
+// TO BE FIXED: is really a uggly hack.
+// The tune tool and some tests are linked with 
+// only a subset of clBLAS that does not contain 
+// the functor related codes. 
+// 
+//void (* _cleanFunctorCachesHook)(void) = 0 ; 
+
 void
 clblasTeardown(void)
 {
@@ -235,6 +245,8 @@ clblasTeardown(void)
     // win32 - crashes
     destroyStorageCache();
 
+    cleanFunctorCaches() ;
+
     printMemLeaksInfo();
     releaseMallocTrace();
 
diff --git a/src/library/blas/matrix.c b/src/library/blas/matrix.c
new file mode 100644
index 0000000..bb65bc9
--- /dev/null
+++ b/src/library/blas/matrix.c
@@ -0,0 +1,979 @@
+/* ************************************************************************
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+#include<stdio.h>
+#include<stdlib.h>
+#include<limits.h>
+#include<clBLAS.h>
+
+#define SWAP(TYPE,a,b)  do { TYPE swap_tmp_ = a ; a = b ; b = swap_tmp_ ; } while(0) 
+
+// Return true if the area starting from pint (x,y) and of size (w,h) is 
+// within the array of size d1 x d2
+static int inside2d( size_t d1, size_t d2, int x, int y, size_t w, size_t h ) 
+{
+  // Very very large dimensions are likely a bug
+  size_t MAXDIM = ((size_t)INT_MAX)  ;
+  size_t max_w = (size_t)(d1-x) ;
+  size_t max_h = (size_t)(d2-y) ;
+
+  if ( d1 >= MAXDIM ) return 0 ; 
+  if ( d2 >= MAXDIM ) return 0 ;
+  if ( w  >= MAXDIM ) return 0 ; 
+  if ( h  >= MAXDIM ) return 0 ;
+
+  if ( x < 0 || x >= (int)d1 ) return 0 ;
+  if ( w > max_w ) return 0 ;
+
+  if ( y < 0 || y >= (int)d2 ) return 0 ;
+  if ( h > max_h ) return 0 ;
+       
+  return 1 ;
+}
+
+clblasStatus clblasMatrixSizeInfo(clblasOrder order,
+                                  size_t rows,
+                                  size_t columns,
+                                  size_t elemsize,
+                                  size_t padding,
+                                  size_t * ld,
+                                  size_t * fullsize)
+{
+  size_t x;
+  size_t y;
+
+  if( order == clblasRowMajor )
+  {
+    x = columns;
+    y = rows;
+  }
+  else 
+  {
+    x = rows;
+    y = columns;
+  }
+
+  // set if not NULL
+  if( ld ) *ld = x + padding;
+  if( fullsize ) *fullsize = (size_t) ( (x + padding) * y * elemsize );
+
+  return clblasSuccess; 
+}
+
+
+cl_mem clblasCreateMatrix(
+    cl_context context,
+    clblasOrder order,
+    size_t rows,
+    size_t columns,
+    size_t elemsize,
+    size_t padding,
+    size_t * ld,
+    size_t * fullsize,
+    cl_int * err)
+{
+  size_t tmp_fullsize;
+  cl_mem_flags flags = CL_MEM_READ_WRITE;
+
+  clblasMatrixSizeInfo(
+      order,
+      rows,
+      columns,
+      elemsize,
+      padding,
+      ld,
+      &tmp_fullsize);
+
+  // set if not NULL
+  if(fullsize != NULL) *fullsize = tmp_fullsize;
+
+  return clCreateBuffer(
+      context,
+      flags,
+      tmp_fullsize,
+      NULL,
+      err);
+}
+
+/*
+ * Internal function: 
+ *  see clblasCreateMatrix()
+ */
+cl_mem clblasCreateMatrixWithLd(
+    cl_context context,
+    clblasOrder order,
+    size_t rows,
+    size_t columns,
+    size_t elemsize,
+    size_t ld,
+    size_t * fullsize,
+    cl_int * err)
+{
+  int nbelem;
+  cl_mem_flags flags = CL_MEM_READ_WRITE;
+
+  // compute number of elements
+  if( order == clblasRowMajor  )
+  {
+    // check ld
+    if( ld < columns )
+    {
+      *err = clblasInvalidValue;
+      return 0;
+    }
+
+    nbelem = rows * ld; 
+  }
+  else if( order == clblasColumnMajor )
+  {
+    // check ld
+    if( ld < rows )
+    {
+      *err = clblasInvalidValue;
+      return 0;
+    }
+
+    nbelem = ld * columns; 
+  }
+
+  // set if not NULL
+  if( fullsize ) *fullsize = (size_t) (nbelem * elemsize );
+
+  // allocate
+  return clCreateBuffer(
+      context,
+      flags,
+      *fullsize,
+      NULL,
+      err);
+}
+
+
+cl_mem clblasCreateMatrixFromHost(
+    cl_context context, 
+    clblasOrder order,
+    size_t rows,
+    size_t columns, 
+    size_t elemsize,
+    size_t ld,
+    void * host,
+    size_t off_host, 
+    size_t ld_host,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,  
+    cl_int * err)
+{
+  size_t fullsize;
+  cl_mem out;
+  size_t i;
+
+  out = clblasCreateMatrixWithLd(
+      context,
+      order,
+      rows,
+      columns,
+      elemsize,
+      ld,
+      &fullsize,
+      err);
+
+  if( ! *err )
+  {
+    printf("ok\n");
+    // TODO use ReadMatrix instead ?
+    if( order == clblasRowMajor )
+    {
+      for( i = 0; i < rows; i++ )
+      {
+        const size_t host_orig[3] = {off_host, off_host, 0};
+        const size_t buff_orig[3] = {0, 0, 0};
+        const size_t region[3] = {columns*elemsize, rows, 1};
+        *err = clEnqueueWriteBufferRect(
+            command_queue,
+            out,
+            CL_TRUE,
+            buff_orig,
+            host_orig,
+            region,
+            columns * elemsize,
+            0,
+            ld_host * elemsize,
+            0,
+            host,
+            numEventsInWaitList,
+            eventWaitList,
+            NULL);
+      }
+    }
+  }
+
+  return out;
+}
+
+/*
+ * Internal function: 
+ *  enqueue event in list and wait for it if blocking
+ */
+static clblasStatus emptyAction(
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event,
+    cl_bool blocking)
+{
+  cl_int err ; 
+
+  err = clEnqueueBarrierWithWaitList(
+      command_queue, 
+      numEventsInWaitList,
+      eventWaitList, 
+      event);
+
+  if (err != clblasSuccess) 
+    return  (clblasStatus)err;
+
+  if(blocking) 
+    return  (clblasStatus)clWaitForEvents(1, event);
+  else 
+    return (clblasStatus)err;
+}
+
+/*
+ * Internal function:
+ *  Generic version of clblasWriteSubMatrix with blocking arg
+ *  event must be non-NULL if blocking is set to CL_TRUE
+ */
+static clblasStatus _clblasWriteSubMatrix(
+    clblasOrder order,
+    size_t element_size,
+    const void *A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    cl_mem B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event,
+    cl_bool blocking)
+{
+  
+  if( order == clblasRowMajor )
+  {
+    SWAP(size_t, xA, yA);
+    SWAP(size_t, nrA, ncA);
+    SWAP(size_t, xB, yB);
+    SWAP(size_t, nrB, ncB);
+    SWAP(size_t, nx, ny);
+  }
+
+  // Check that the specified area is within the array A
+  if ( !inside2d( nrA,ncA, xA,yA , nx,ny ) ) {
+    return clblasInvalidValue ;
+  }
+
+  // Check that the specified area is within the array B 
+  if ( !inside2d( nrB,ncB, xB,yB , nx,ny ) ) {
+    return clblasInvalidValue ;
+  }
+
+
+  if( nx == 0 || ny == 0 )
+  { 
+    return emptyAction(
+        command_queue,
+        numEventsInWaitList, 
+        eventWaitList, 
+        event,
+        blocking);  
+  } 
+
+  {
+    const size_t origA[3] = { (xA+offA)*element_size, yA, 0 };
+    const size_t origB[3] = { (xB+offB)*element_size, yB, 0 };
+    const size_t region[3] = { nx * element_size, ny, 1 };
+
+    return (clblasStatus) clEnqueueWriteBufferRect(
+        command_queue,
+        B,
+        blocking,
+        origB,
+        origA,
+        region,
+        ldB * element_size,
+        0,
+        ldA * element_size,
+        0,
+        A,
+        numEventsInWaitList,
+        eventWaitList,
+        event);
+  }
+}
+
+clblasStatus clblasWriteSubMatrix(
+    clblasOrder order,
+    size_t element_size,
+    const void *A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    cl_mem B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  cl_event evt;
+
+  return _clblasWriteSubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      nrA, ncA,
+      xA, yA,
+      B, offB, ldB,
+      nrB, ncB,
+      xB, yB,
+      nx, ny,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      &evt,
+      CL_TRUE);
+}
+
+clblasStatus clblasWriteSubMatrixAsync(
+    clblasOrder order,
+    size_t element_size,
+    const void *A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    cl_mem B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event)
+{
+  return _clblasWriteSubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      nrA, ncA,
+      xA, yA,
+      B, offB, ldB,
+      nrB, ncB,
+      xB, yB,
+      nx, ny,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      event,
+      CL_FALSE);
+}
+
+
+/*
+ * Internal function:
+ *  Generic version of clblasReadSubMatrix with blocking arg
+ *  event must be non-NULL if blocking is set to CL_TRUE
+ */
+static clblasStatus _clblasReadSubMatrix(
+    clblasOrder order,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    void *B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event,
+    cl_bool blocking)
+{
+
+  if( order == clblasRowMajor )
+  {
+    SWAP(size_t, xA, yA);
+    SWAP(size_t, nrA, ncA);
+    SWAP(size_t, xB, yB);
+    SWAP(size_t, nrB, ncB);
+    SWAP(size_t, nx, ny);
+  }
+
+  if( nx == 0 || ny == 0 )
+  { 
+    return emptyAction(
+        command_queue,
+        numEventsInWaitList, 
+        eventWaitList, 
+        event,
+        blocking);  
+  } 
+
+  // Check that the specified area is within the array A
+  if ( !inside2d( nrA,ncA, xA,yA , nx,ny ) ) {
+    return clblasInvalidValue ;
+  }
+
+  // Check that the specified area is within the array B 
+  if ( !inside2d( nrB,ncB, xB,yB , nx,ny ) ) {
+    return clblasInvalidValue ;
+  }
+
+  {
+    const size_t origA[3] = { (xA+offA)*element_size, yA, 0 };
+    const size_t origB[3] = { (xB+offB)*element_size, yB, 0 };
+    const size_t region[3] = { nx * element_size, ny, 1 };
+
+    return (clblasStatus) clEnqueueReadBufferRect(
+        command_queue,
+        A,
+        blocking,
+        origA,
+        origB,
+        region,
+        ldA * element_size,
+        0,
+        ldB * element_size,
+        0,
+        B,
+        numEventsInWaitList,
+        eventWaitList,
+        event);
+  }
+}
+
+
+clblasStatus clblasReadSubMatrix(
+    clblasOrder order,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    void *B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  cl_event evt;
+
+  return _clblasReadSubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      nrA, ncA,
+      xA, yA,
+      B, offB, ldB,
+      nrB, ncB,
+      xB, yB,
+      nx, ny,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      &evt,
+      CL_TRUE);
+}
+
+
+clblasStatus clblasReadSubMatrixAsync(
+    clblasOrder order,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    void *B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event)
+{
+  return _clblasReadSubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      nrA, ncA,
+      xA, yA,
+      B, offB, ldB,
+      nrB, ncB,
+      xB, yB,
+      nx, ny,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      event,
+      CL_TRUE);
+}
+
+
+/*
+ * Internal function:
+ *  Generic version of clblasCopySubMatrix with blocking arg
+ *  event must be non-NULL if blocking is set to CL_TRUE
+ */
+static clblasStatus _clblasCopySubMatrix(
+    clblasOrder order,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    cl_mem B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event,
+    cl_bool blocking)
+{
+  cl_int err;
+  if( order == clblasRowMajor )
+  {
+    SWAP(size_t, xA, yA);
+    SWAP(size_t, nrA, ncA);
+    SWAP(size_t, xB, yB);
+    SWAP(size_t, nrB, ncB);
+    SWAP(size_t, nx, ny);
+  }
+
+  if( nx == 0 || ny == 0 )
+  { 
+    return emptyAction(
+        command_queue,
+        numEventsInWaitList, 
+        eventWaitList, 
+        event,
+        CL_FALSE);  
+  } 
+
+  // Check that the specified area is within the array A
+  if ( !inside2d( nrA,ncA, xA,yA , nx,ny ) ) {
+    return clblasInvalidValue ;
+  }
+
+  // Check that the specified area is within the array B 
+  if ( !inside2d( nrB,ncB, xB,yB , nx,ny ) ) {
+    return clblasInvalidValue ;
+  }
+
+  {
+    const size_t origA[3] = { (xA+offA)*element_size, yA, 0 };
+    const size_t origB[3] = { (xB+offB)*element_size, yB, 0 };
+    const size_t region[3] = { nx * element_size, ny, 1 };
+
+    err = clEnqueueCopyBufferRect(
+                                  command_queue,
+                                  A,
+                                  B,
+                                  origA,
+                                  origB,
+                                  region,
+                                  ldA * element_size,
+                                  0,
+                                  ldB * element_size,
+                                  0,
+                                  numEventsInWaitList,
+                                  eventWaitList,
+                                  event);
+  }
+
+  if (err != clblasSuccess) 
+    return  (clblasStatus)err;
+
+  if(blocking) 
+    return  (clblasStatus)clWaitForEvents(1, event);
+  else 
+    return (clblasStatus)err;
+}
+
+
+clblasStatus clblasCopySubMatrix(
+    clblasOrder order,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    cl_mem B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  cl_event evt;
+
+  return (clblasStatus) _clblasCopySubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      nrA, ncA,
+      xA, yA,
+      B, offB, ldB,
+      nrB, ncB,
+      xB, yB,
+      nx, ny,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      &evt,      
+      CL_TRUE);
+}
+
+
+clblasStatus clblasCopySubMatrixAsync(
+    clblasOrder order,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    size_t nrA, size_t ncA,
+    size_t xA, size_t yA,
+    cl_mem B, size_t offB, size_t ldB,
+    size_t nrB, size_t ncB,
+    size_t xB, size_t yB,
+    size_t nx, size_t ny,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *event)
+{
+  return (clblasStatus) _clblasCopySubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      nrA, ncA,
+      xA, yA,
+      B, offB, ldB,
+      nrB, ncB,
+      xB, yB,
+      nx, ny,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      event,
+      CL_FALSE);
+}
+
+
+clblasStatus clblasWriteVector(
+    size_t nb_elem,
+    size_t element_size,
+    const void *A, size_t offA,
+    cl_mem B, size_t offB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  return clblasWriteMatrix(
+      clblasColumnMajor,
+      nb_elem, 1,
+      element_size,
+      A, offA, nb_elem,
+      B, offB, nb_elem,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList);
+}
+
+
+clblasStatus clblasWriteVectorAsync(
+    size_t nb_elem,
+    size_t element_size,
+    const void *A, size_t offA,
+    cl_mem B, size_t offB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  return clblasWriteMatrixAsync(
+      clblasColumnMajor,
+      nb_elem, 1,
+      element_size,
+      A, offA, nb_elem,
+      B, offB, nb_elem,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      events);
+}
+
+
+clblasStatus clblasReadVector(
+    size_t nb_elem,
+    size_t element_size,
+    const cl_mem A, size_t offA,
+    void * B, size_t offB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  return clblasReadMatrix(
+      clblasColumnMajor,
+      nb_elem, 1,
+      element_size,
+      A, offA, nb_elem,
+      B, offB, nb_elem,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList);
+}
+
+
+clblasStatus clblasReadVectorAsync(
+    size_t nb_elem,
+    size_t element_size,
+    const cl_mem A, size_t offA,
+    void * B, size_t offB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  return clblasReadMatrixAsync(
+      clblasColumnMajor,
+      nb_elem, 1,
+      element_size,
+      A, offA, nb_elem,
+      B, offB, nb_elem,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      events);
+}
+
+
+clblasStatus clblasCopyVector(
+    size_t nb_elem,
+    size_t element_size,
+    const cl_mem A, size_t offA,
+    cl_mem B, size_t offB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  return clblasCopyMatrix(
+      clblasColumnMajor,
+      nb_elem, 1,
+      element_size,
+      A, offA, nb_elem,
+      B, offB, nb_elem,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList);
+}
+
+
+clblasStatus clblasCopyVectorAsync(
+    size_t nb_elem,
+    size_t element_size,
+    const cl_mem A, size_t offA,
+    cl_mem B, size_t offB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  return clblasCopyMatrixAsync(
+      clblasColumnMajor,
+      nb_elem, 1,
+      element_size,
+      A, offA, nb_elem,
+      B, offB, nb_elem,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      events);
+}
+
+
+clblasStatus clblasWriteMatrix(
+    clblasOrder order,
+    size_t sx, size_t sy,
+    size_t element_size,
+    const void *A, size_t offA, size_t ldA,
+    cl_mem B, size_t offB, size_t ldB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  return clblasWriteSubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      sx, sy,
+      0, 0,
+      B, offB, ldB,
+      sx, sy,
+      0, 0,
+      sx, sy,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList);		
+}
+
+
+clblasStatus clblasWriteMatrixAsync(
+    clblasOrder order,
+    size_t sx, size_t sy,
+    size_t element_size,
+    const void *A, size_t offA, size_t ldA,
+    cl_mem B, size_t offB, size_t ldB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  return clblasWriteSubMatrixAsync(
+      order,
+      element_size,
+      A, offA, ldA,
+      sx, sy,
+      0, 0,
+      B, offB, ldB,
+      sx, sy,
+      0, 0,
+      sx, sy,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      events);	
+}
+
+
+clblasStatus clblasReadMatrix(
+    clblasOrder order,
+    size_t sx, size_t sy,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    void * B, size_t offB, size_t ldB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  return clblasReadSubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      sx, sy,
+      0, 0,
+      B, offB, ldB,
+      sx, sy,
+      0, 0,
+      sx, sy,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList);
+}
+
+
+clblasStatus clblasReadMatrixAsync(
+    clblasOrder order,
+    size_t sx, size_t sy,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    void * B, size_t offB, size_t ldB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  return clblasReadSubMatrixAsync(
+      order,
+      element_size,
+      A, offA, ldA,
+      sx, sy,
+      0, 0,
+      B, offB, ldB,
+      sx, sy,
+      0, 0,
+      sx, sy,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      events);
+}
+
+
+clblasStatus clblasCopyMatrix(
+    clblasOrder order,
+    size_t sx, size_t sy,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    cl_mem B, size_t offB, size_t ldB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList)
+{
+  return clblasCopySubMatrix(
+      order,
+      element_size,
+      A, offA, ldA,
+      sx, sy,
+      0, 0,
+      B, offB, ldB,
+      sx, sy,
+      0, 0,
+      sx, sy,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList);
+}
+
+
+clblasStatus clblasCopyMatrixAsync(
+    clblasOrder order,
+    size_t sx, size_t sy,
+    size_t element_size,
+    const cl_mem A, size_t offA, size_t ldA,
+    cl_mem B, size_t offB, size_t ldB,
+    cl_command_queue command_queue,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  return clblasCopySubMatrixAsync(
+      order,
+      element_size,
+      A, offA, ldA,
+      sx, sy,
+      0, 0,
+      B, offB, ldB,
+      sx, sy,
+      0, 0,
+      sx, sy,
+      command_queue,
+      numEventsInWaitList,
+      eventWaitList,
+      events);
+}
+
diff --git a/src/library/blas/xgemm.c b/src/library/blas/xgemm.c
deleted file mode 100644
index c5d7209..0000000
--- a/src/library/blas/xgemm.c
+++ /dev/null
@@ -1,783 +0,0 @@
-/* ************************************************************************
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ************************************************************************/
-
-
-#include <string.h>
-#include <clBLAS.h>
-#include <stdlib.h>
-
-#include <devinfo.h>
-#include "clblas-internal.h"
-#include "solution_seq.h"
-
-static clblasStatus
-doGemm(
-    CLBlasKargs *kargs,
-    clblasOrder order,
-    clblasTranspose transA,
-    clblasTranspose transB,
-    size_t M,
-    size_t N,
-    size_t K,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    const cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_mem C,
-    size_t offC,
-    size_t ldc,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    cl_int err;
-    ListHead seq;
-    clblasStatus retCode = clblasSuccess;
-
-    if (!clblasInitialized) {
-        return clblasNotInitialized;
-    }
-
-    /* Validate arguments */
-
-    if ((retCode = checkMemObjects(A, B, C, true, A_MAT_ERRSET, B_MAT_ERRSET, C_MAT_ERRSET))) {
-        return retCode;
-    }
-    if (K != 0) {
-        if ((retCode = checkMatrixSizes(kargs->dtype, order, transA, M,
-                                        K, A, offA, lda, A_MAT_ERRSET ))) {
-            return retCode;
-        }
-        if ((retCode = checkMatrixSizes(kargs->dtype, order, transB,
-                                        K, N, B, offB, ldb, B_MAT_ERRSET ))) {
-            return retCode;
-        }
-    }
-    if ((retCode = checkMatrixSizes(kargs->dtype, order, clblasNoTrans,
-                                    M, N, C, offC, ldc, C_MAT_ERRSET ))) {
-            return retCode;
-    }
-
-	#ifdef DEBUG_2
-	printf("DoGemm being called...\n");
-	#endif
-    kargs->order = order;
-    kargs->transA = transA;
-    kargs->transB = transB;
-    kargs->M = M;
-    kargs->N = N;
-    kargs->K = K;
-    kargs->A = A;
-    kargs->offA = offA;
-    kargs->lda.matrix = lda;
-    kargs->B = B;
-    kargs->offBX = offB;
-    kargs->ldb.matrix = ldb;
-    kargs->C = C;
-    kargs->offCY = offC;
-    kargs->ldc.matrix = ldc;
-
-    kargs->offsetM = 0;
-    kargs->offsetN = 0;
-    kargs->scimage[0] = 0;
-    kargs->scimage[1] = 0;
-
-    listInitHead(&seq);
-    err = makeSolutionSeq(CLBLAS_GEMM, kargs, numCommandQueues, commandQueues,
-        numEventsInWaitList, eventWaitList, events, &seq);
-    if (err == CL_SUCCESS) {
-        err = executeSolutionSeq(&seq);
-    }
-
-    freeSolutionSeq(&seq);
-
-    return (clblasStatus)err;
-}
-
-
-static ssize_t
-TransposeKernel(
-   char *buf,
-   size_t buflen,
-   const struct SubproblemDim *subdims,
-   void *extra)
-{
-/*
- *Transpose kernel generator
- *a typical kernel for mod4 sizes in both direction looks like below
-
-                   "// micro tile size is 4 x 4 \n"
-				   "// matrix are of column major \n"
-                   "#pragma OPENCL EXTENSION cl_amd_printf : enable \n"
-                   "void __kernel \n"
-                   "transpose(  uint X, \n"
-			       "uint Y, \n"
-			       "uint ld, \n"
-			       "uint offset, \n"
-			       "const __global float *restrict mat, \n"
-			       "__global float *transposed_mat) \n"
-                   "{ \n"
-				   "transposed_mat += offset; \n"
-				   "mat += offset; \n"
-		           "transposed_mat += ( (uint)get_global_id(1) * Y + (uint)get_global_id(0) ) << 2; \n"
-		           "mat += ( (uint)get_global_id(0) * ld + (uint)get_global_id(1) ) << 2; \n"
-		           "//transpose inside the block \n"
-		           "transposed_mat[0] = mat[0]; \n"
-		           "transposed_mat[1] = mat[ld]; \n"
-		           "transposed_mat[2] = mat[ld*2]; \n"
-		           "transposed_mat[3] = mat[ld*3]; \n"
-				   "\n"
-		           "transposed_mat[Y] = mat[1]; \n"
-		           "transposed_mat[Y+1] = mat[1+ld]; \n"
-		           "transposed_mat[Y+2] = mat[1+ld*2]; \n"
-		           "transposed_mat[Y+3] = mat[1+ld*3]; \n"
-				   "\n"
-		           "transposed_mat[2*Y] = mat[2]; \n"
-		           "transposed_mat[2*Y+1] = mat[2+ld]; \n"
-		           "transposed_mat[2*Y+2] = mat[2+ld*2]; \n"
-		           "transposed_mat[2*Y+3] = mat[2+ld*3]; \n"
-				   "\n"
-		           "transposed_mat[3*Y] = mat[3]; \n"
-		           "transposed_mat[3*Y+1] = mat[3+ld]; \n"
-		           "transposed_mat[3*Y+2] = mat[3+ld*2]; \n"
-		           "transposed_mat[3*Y+3] = mat[3+ld*3]; \n"
-                   "}";
-*/
-    struct KgenContext *ctx;
-    ssize_t ret = 0;
-    char tmp[2048];
-    int modX = subdims->x;
-    int modY = subdims->y;
-
-    ctx = createKgenContext(buf, buflen, true);
-
-    sprintf(tmp, "// micro tile size is 4 x 4 \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "// matrix are of column major \n");
-    kgenAddStmt(ctx, tmp);
-
-    //kernel declartion
-    sprintf(tmp, "void __kernel \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "transpose(  uint X, \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "uint Y, \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "uint ld, \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "uint offset, \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "const __global float *restrict mat, \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "__global float *transposed_mat) \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "{ \n");
-    kgenAddStmt(ctx, tmp);
-
-    //kernel body
-    sprintf(tmp, "uint global_id_0 = (uint)get_global_id(0); \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "uint global_id_1 = (uint)get_global_id(1); \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "uint global_size_0 = (uint)get_global_size(0); \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "uint global_size_1 = (uint)get_global_size(1); \n");
-    kgenAddStmt(ctx, tmp);
-
-    sprintf(tmp, "transposed_mat += offset; \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "mat += offset; \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "transposed_mat += ( global_id_1 * Y + global_id_0 ) << 2; \n");
-    kgenAddStmt(ctx, tmp);
-    sprintf(tmp, "mat += ( global_id_0 * ld + global_id_1 ) << 2; \n");
-    kgenAddStmt(ctx, tmp);
-
-    sprintf(tmp, "//transpose inside the block \n");
-    kgenAddStmt(ctx, tmp);
-    //first block
-    sprintf(tmp, "transposed_mat[0] = mat[0]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[1] = mat[ld]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2 )
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[2] = mat[ld*2]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[3] = mat[ld*3]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "\n");
-    kgenAddStmt(ctx, tmp);
-
-    //second block
-    if(modX == 1)
-    {
-        sprintf(tmp, "if( global_id_1 < global_size_1 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[Y] = mat[1]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[Y+1] = mat[1+ld]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[Y+2] = mat[1+ld*2]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[Y+3] = mat[1+ld*3]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "\n");
-    kgenAddStmt(ctx, tmp);
-    if(modX == 1)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-
-	//third block
-    if(modX == 1 || modX == 2)
-    {
-        sprintf(tmp, "if( global_id_1 < global_size_1 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[2*Y] = mat[2]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[2*Y+1] = mat[2+ld]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[2*Y+2] = mat[2+ld*2]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[2*Y+3] = mat[2+ld*3]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "\n");
-    kgenAddStmt(ctx, tmp);
-    if(modX == 1 || modX == 2)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-	}
-
-	//fourth block
-    if(modX == 1 || modX == 2 || modX == 3)
-    {
-        sprintf(tmp, "if( global_id_1 < global_size_1 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[3*Y] = mat[3]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[3*Y+1] = mat[3+ld]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY == 1)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[3*Y+2] = mat[3+ld*2]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "if( global_id_0 < global_size_0 - 1 ) \n");
-        kgenAddStmt(ctx, tmp);
-        sprintf(tmp, "{ \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    sprintf(tmp, "transposed_mat[3*Y+3] = mat[3+ld*3]; \n");
-    kgenAddStmt(ctx, tmp);
-    if(modY ==1 || modY == 2 || modY == 3)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-    if(modX == 1 || modX == 2 || modX == 3)
-    {
-        sprintf(tmp, "} \n");
-        kgenAddStmt(ctx, tmp);
-    }
-
-
-	sprintf(tmp, "} \n");
-    kgenAddStmt(ctx, tmp);
-
-    if (!ret) {
-        ret = (ssize_t)kgenSourceSize(ctx) + 1;
-    }
-    destroyKgenContext(ctx);
-	return ret;
-}
-void 
-transposeMemObject(
-    clblasOrder order,
-    size_t X,
-    size_t Y,
-    size_t ld,
-    size_t offset,
-    const cl_mem src,
-    cl_mem dst,
-    cl_context context,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-/**
- *transposition of a 2D MemObject
- * @param[in] order     Row/column order.
- * @param[in] X         Number of columns in transposed matrix / rows in input matrix.
- * @param[in] Y         Number of rows in transposed matrix / columns in input matrix.
- * @param[in] ld        Leading dimension of input matrix.
- * @param[in] offset    offset size.
- * @param[in] src       Input matrix of the transposition.
- * @param[in] dst       Output matrix of the transposition.
- * @param[in] numCommandQueues    Number of OpenCL command queues in which the
- *                                task is to be performed.
- * @param[in] commandQueues       OpenCL command queues.
- * @param[in] numEventsInWaitList Number of events in the event wait list.
- * @param[in] eventWaitList       Event wait list.
- * @param[in] events     Event objects per each command queue that identify
- *                       a particular kernel execution instance.
- */
-
-
-    char *source;
-    cl_int err;
-    cl_kernel kernel;
-    Kernel *transpose_kernel;
-    solver_id_t sid;
-    KernelKey key;
-    CLBLASKernExtra extra;
-    BlasFunctionID funcID = CLBLAS_TRANSPOSE;
-    char *log;
-    const cl_uint workDim = 2;
-    const size_t localWorkSize[2] = { 8, 8 };
-    size_t globalWorkSize[2];
-    ssize_t size;
-
-    sid = makeSolverID(funcID, 1);
-    memset(key.subdims, 0, sizeof(key.subdims));
-    key.nrDims = 2;
-    key.subdims[0].x = X%4;
-    key.subdims[0].y = Y%4;
-    key.subdims[0].bwidth = 1; 
-    key.subdims[0].itemX = 4;
-    key.subdims[0].itemY = 4;
-    memset(&extra, 0, sizeof(extra));
-
-    err = getQueueDevice(*commandQueues, &key.device);
-    err = getQueueContext(*commandQueues, &key.context);
-
-    //look for the kernel from cache first
-    if (areKernelsCacheable()) 
-    {
-        transpose_kernel = findKernel(clblasKernelCache, sid, &key, &extra);
-    }
-
-    // if transpose_kernel was not found from cache, create the kernel
-    if (transpose_kernel == NULL)
-    {
-        transpose_kernel = allocKernel();
-        log = malloc(65536);
-        if (log) {
-            log[0] = '\0';
-        }
-        //kernel source auto generation
-        //call size = TransposeKernel(NULL, 0, ...)
-        //then allocate buffer and call TransposeKernel again
-        size = TransposeKernel(NULL, 0, &key.subdims[0], &extra);
-        source = calloc(1, size);
-        TransposeKernel(source, size, &key.subdims[0], &extra);
-        //printf("transpose source: %s\n", source);
-        transpose_kernel->program = buildClProgram(source, NULL, key.context, key.device,
-                                     log, 65536, &err);
-        transpose_kernel->extraSize = sizeof(CLBLASKernExtra);
-        transpose_kernel->extra = calloc(1, transpose_kernel->extraSize);// memory freed by clblasTeardown
-        *(CLBLASKernExtra*)(transpose_kernel->extra) = extra;
-
-        //save the kernel in cache 
-        getKernel(transpose_kernel);
-        if (addKernelToCache(clblasKernelCache, sid, transpose_kernel, &key,
-                             clblasKernelExtraCmp)) 
-        {
-            putKernel(clblasKernelCache, transpose_kernel);
-        }
-        free(log);
-        free(source);
-    }
-
-    //launch the kernel
-    err = clCreateKernelsInProgram(transpose_kernel->program, 1, &kernel, NULL);
-
-    err = clSetKernelArg(kernel, 0, sizeof(cl_uint), &X);
-    err = clSetKernelArg(kernel, 1, sizeof(cl_uint), &Y);
-    err = clSetKernelArg(kernel, 2, sizeof(cl_uint), &ld);
-    err = clSetKernelArg(kernel, 3, sizeof(cl_uint), &offset);
-    err = clSetKernelArg(kernel, 4, sizeof(cl_mem), &src);
-    err = clSetKernelArg(kernel, 5, sizeof(cl_mem), &dst);
-
-    globalWorkSize[0] = (Y+4-1)/4;
-    globalWorkSize[1] = (X+4-1)/4;
-
-    err = clEnqueueNDRangeKernel(*commandQueues, kernel, workDim, NULL,
-        globalWorkSize, localWorkSize, 0, NULL, NULL);
-    clFinish(*commandQueues);
-	
-    clReleaseKernel(kernel);
-
-
-}
-
-clblasStatus
-clblasSgemm(
-    clblasOrder order,
-    clblasTranspose transA,
-    clblasTranspose transB,
-    size_t M,
-    size_t N,
-    size_t K,
-    cl_float alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    const cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_float beta,
-    cl_mem C,
-    size_t offC,
-    size_t ldc,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-    cl_context context;
-    cl_device_id device;
-    cl_int err;
-    cl_mem transposed_A;
-    clblasStatus status;
-    float *transposed_A_host;
-    size_t device_size;
-    char* device_name;
-    int fast_sgemmtn = 0;
-
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_FLOAT;
-    kargs.alpha.argFloat = alpha;
-    kargs.beta.argFloat = beta;
-
-    err = clGetCommandQueueInfo( *commandQueues, CL_QUEUE_DEVICE, sizeof(cl_device_id), &device, NULL);
-    if( err < 0)
-        return clblasInvalidCommandQueue;
-    
-    err = clGetDeviceInfo(device, CL_DEVICE_NAME, 0, NULL, &device_size);
-    device_name = (char*)malloc(device_size * sizeof(char));
-    err = clGetDeviceInfo(device, CL_DEVICE_NAME, device_size, device_name, NULL);
-    if( err < 0)
-        return clblasInvalidDevice;
-
-    if(getenv("CLBLAS_FAST_SGEMM_TN") != NULL)
-        fast_sgemmtn = *getenv("CLBLAS_FAST_SGEMM_TN") - '0';
-
-	
-	//if the env CLBLAS_FAST_SGEMMTN is set to 1
-	//and if transA = T, transB = N and order = clblasColumnMajor 
-	//and if the devices are Spectre, Hawaii or Tahiti
-	//do the transpose first 
-	//and then call the NN sgemm is a fater apporach. 
-	//the cost of this approach is the use of an extra cl_mem object
-	if( ( fast_sgemmtn == 1 ) && ( strcmp(device_name, "Spectre") || strcmp(device_name, "Hawaii") || strcmp(device_name, "Tahiti") )  && (transA == clblasTrans && transB == clblasNoTrans && order == clblasColumnMajor) )
-	{
-        //do the transpose on A
-        //only transpose the leading part of the matrix
-        //update ldb and transB would be necessary
-        free(device_name);
-        err = clGetCommandQueueInfo( *commandQueues, CL_QUEUE_CONTEXT, sizeof(cl_context), &context, NULL);
-
-        transposed_A = clCreateBuffer(context, CL_MEM_READ_WRITE, (M * K + offA) * sizeof(float), NULL, &err);
-        if( err < 0 )
-            return clblasOutOfResources;
-
-        transposeMemObject(order, K, M, lda, offA, A, transposed_A, context, numCommandQueues, commandQueues,
-							numEventsInWaitList, eventWaitList, events);
-
-        //transA should be reset to clblasNoTrans
-        transA = clblasNoTrans;
-        //update lda to the minimal size 
-        lda = M;
-        //now call doGemm with transposed A, updated lda and updated transA
-        status = doGemm(&kargs, order, transA, transB, M, N, K, transposed_A, offA, lda,
-                  B, offB, ldb, C, offC, ldc, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-		
-        clReleaseMemObject( transposed_A );
-		
-        return status;
-    }
-    else
-    {
-        free(device_name);
-        return doGemm(&kargs, order, transA, transB, M, N, K, A, offA, lda,
-                  B, offB, ldb, C, offC, ldc, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-    }
-
-
-}
-
-clblasStatus
-clblasDgemm(
-    clblasOrder order,
-    clblasTranspose transA,
-    clblasTranspose transB,
-    size_t M,
-    size_t N,
-    size_t K,
-    cl_double alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    const cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_double beta,
-    cl_mem C,
-    size_t offC,
-    size_t ldc,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_DOUBLE;
-    kargs.alpha.argDouble = alpha;
-    kargs.beta.argDouble = beta;
-
-    return doGemm(&kargs, order, transA, transB, M, N, K, A, offA, lda,
-                  B, offB, ldb, C, offC, ldc, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-}
-
-clblasStatus
-clblasCgemm(
-    clblasOrder order,
-    clblasTranspose transA,
-    clblasTranspose transB,
-    size_t M,
-    size_t N,
-    size_t K,
-    FloatComplex alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    const cl_mem B,
-    size_t offB,
-    size_t ldb,
-    FloatComplex beta,
-    cl_mem C,
-    size_t offC,
-    size_t ldc,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_COMPLEX_FLOAT;
-    kargs.alpha.argFloatComplex = alpha;
-    kargs.beta.argFloatComplex = beta;
-
-    return doGemm(&kargs, order, transA, transB, M, N, K, A, offA, lda,
-                  B, offB, ldb, C, offC, ldc, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-}
-
-clblasStatus
-clblasZgemm(
-    clblasOrder order,
-    clblasTranspose transA,
-    clblasTranspose transB,
-    size_t M,
-    size_t N,
-    size_t K,
-    DoubleComplex alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    const cl_mem B,
-    size_t offB,
-    size_t ldb,
-    DoubleComplex beta,
-    cl_mem C,
-    size_t offC,
-    size_t ldc,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_COMPLEX_DOUBLE;
-    kargs.alpha.argDoubleComplex = alpha;
-    kargs.beta.argDoubleComplex = beta;
-
-    return doGemm(&kargs, order, transA, transB, M, N, K, A, offA, lda,
-                  B, offB, ldb, C, offC, ldc, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-}
diff --git a/src/library/blas/xgemm.cc b/src/library/blas/xgemm.cc
new file mode 100644
index 0000000..02c2073
--- /dev/null
+++ b/src/library/blas/xgemm.cc
@@ -0,0 +1,328 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#include <string.h>
+#include <clBLAS.h>
+
+#include <functor.h>
+#include <functor_selector.h>
+
+// Transform a gemm in clblasRowMajor into a gemm in clblasColumnMajor:
+//
+// The idea is basically that
+//   C = A*B + C
+// can be computed as 
+//   C' = (A*B + C)'
+//      = B'*A' + C'
+// And since changing the order is basically a transpose on each matrix,
+// the formula becomes with the new order
+//   C = B*A + C
+//
+// When enabled, only the ColumnMajor kernels need to be implemented
+// for all GEMM 
+//
+
+#define FORCE_COLUMN_MAJOR 1
+
+#if FORCE_COLUMN_MAJOR
+template <typename Args>
+static void force_gemm_column_major(Args & args)
+{
+    if (args.order == clblasRowMajor)
+    {
+        std::swap(args.transA , args.transB);
+        std::swap(args.M      , args.N);
+        std::swap(args.offA   , args.offB);
+        std::swap(args.lda    , args.ldb);
+        std::swap(args.A      , args.B);
+        args.order = clblasColumnMajor;
+    }
+}
+#endif
+
+//
+// This file provide the public clBLAS API for
+//
+//   clblasSgemm() 
+//   clblasDgemm() 
+//   clblasCgemm() 
+//   clblasZgemm() 
+//
+// using functors 
+// 
+// Potential optimizations: 
+//
+//  - Check the values of alpha, beta, M, N and K to 
+//    transform the gemm into an equivalent but cheaper 
+//    scal or gemv where possible.
+//
+//  - Get rid of the 'order' argument assuming that 
+//    row-major is equivalent to the transpose of column-major.
+//    That is  
+//
+//       C  = alpha * A * B + beta * C 
+//
+//    is equivalent to 
+//
+//       C' = alpha * B' * A' + beta * C'  
+//
+//    and, when considering the opposite order, is equivalent to   
+//
+//       C  = alpha * B * A + beta * C  
+//
+//    By applying that transformation early, the functors implementing 
+//    the GEMMs only have to consider one of the two cases. 
+//
+
+
+extern "C" 
+clblasStatus 
+clblasSgemm( clblasOrder order,
+             clblasTranspose transA,
+             clblasTranspose transB,
+             size_t M, size_t N, size_t K,
+             cl_float alpha,
+             const cl_mem A, size_t offA, size_t lda,
+             const cl_mem B, size_t offB, size_t ldb,
+             cl_float beta,
+             cl_mem C, size_t offC,  size_t ldc,
+             cl_uint numCommandQueues,
+             cl_command_queue *commandQueues,
+             cl_uint numEventsInWaitList,
+             const cl_event *eventWaitList,
+             cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+   CHECK_MATRIX_A(TYPE_FLOAT, order, transA, A, M, K, offA, lda);
+   CHECK_MATRIX_B(TYPE_FLOAT, order, transB, B, K, N, offB, ldb);
+   CHECK_MATRIX_C(TYPE_FLOAT, order, clblasNoTrans, C, M, N, offC, ldc);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasSgemmFunctor::Args args(order,
+                                 transA,
+                                 transB,
+                                 M, N, K,
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb,
+                                 beta,
+                                 C, offC, ldc,
+                                 queue,
+                                 numEventsInWaitList,
+                                 eventWaitList,
+                                 events);
+
+#if FORCE_COLUMN_MAJOR
+   force_gemm_column_major(args);
+#endif
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasSgemmFunctor * functor = fselector->select_sgemm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+extern "C" 
+clblasStatus
+clblasDgemm( clblasOrder order,
+             clblasTranspose transA,
+             clblasTranspose transB,
+             size_t M, size_t N,  size_t K,
+             cl_double alpha,
+             const cl_mem A, size_t offA, size_t lda,
+             const cl_mem B, size_t offB, size_t ldb,
+             cl_double beta,
+             cl_mem C, size_t offC, size_t ldc,
+             cl_uint numCommandQueues,
+             cl_command_queue *commandQueues,
+             cl_uint numEventsInWaitList,
+             const cl_event *eventWaitList,
+             cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+   CHECK_MATRIX_A(TYPE_DOUBLE, order, transA, A, M, K, offA, lda);
+   CHECK_MATRIX_B(TYPE_DOUBLE, order, transB, B, K, N, offB, ldb);
+   CHECK_MATRIX_C(TYPE_DOUBLE, order, clblasNoTrans, C, M, N, offC, ldc);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasDgemmFunctor::Args args(order,
+                                 transA,
+                                 transB,
+                                 M, N, K,
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb,
+                                 beta,
+                                 C, offC, ldc,
+                                 queue,
+                                 numEventsInWaitList,
+                                 eventWaitList,
+                                 events);
+
+#if FORCE_COLUMN_MAJOR
+   force_gemm_column_major(args);
+#endif
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasDgemmFunctor * functor = fselector->select_dgemm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+extern "C" 
+clblasStatus
+clblasCgemm(
+    clblasOrder order,
+    clblasTranspose transA,
+    clblasTranspose transB,
+    size_t M, size_t N, size_t K,
+    FloatComplex alpha,
+    const cl_mem A, size_t offA, size_t lda,
+    const cl_mem B, size_t offB, size_t ldb,
+    FloatComplex beta,
+    cl_mem C, size_t offC, size_t ldc,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+   CHECK_MATRIX_A(TYPE_COMPLEX_FLOAT, order, transA, A, M, K, offA, lda);
+   CHECK_MATRIX_B(TYPE_COMPLEX_FLOAT, order, transB, B, K, N, offB, ldb);
+   CHECK_MATRIX_C(TYPE_COMPLEX_FLOAT, order, clblasNoTrans, C, M, N, offC, ldc);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasCgemmFunctor::Args args(order,
+                                 transA,
+                                 transB,
+                                 M, N, K,
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb,
+                                 beta,
+                                 C, offC, ldc,
+                                 queue,
+                                 numEventsInWaitList,
+                                 eventWaitList,
+                                 events);
+
+#if FORCE_COLUMN_MAJOR
+   force_gemm_column_major(args);
+#endif
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasCgemmFunctor * functor = fselector->select_cgemm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+extern "C" 
+clblasStatus
+clblasZgemm(
+    clblasOrder order,
+    clblasTranspose transA,
+    clblasTranspose transB,
+    size_t M, size_t N, size_t K,
+    DoubleComplex alpha,
+    const cl_mem A, size_t offA, size_t lda,
+    const cl_mem B, size_t offB, size_t ldb,
+    DoubleComplex beta,
+    cl_mem C, size_t offC, size_t ldc,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+   CHECK_MATRIX_A(TYPE_COMPLEX_DOUBLE, order, transA, A, M, K, offA, lda);
+   CHECK_MATRIX_B(TYPE_COMPLEX_DOUBLE, order, transB, B, K, N, offB, ldb);
+   CHECK_MATRIX_C(TYPE_COMPLEX_DOUBLE, order, clblasNoTrans, C, M, N, offC, ldc);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasZgemmFunctor::Args args(order,
+                                 transA,
+                                 transB,
+                                 M, N, K,
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb,
+                                 beta,
+                                 C, offC, ldc,
+                                 queue,
+                                 numEventsInWaitList,
+                                 eventWaitList,
+                                 events);
+
+#if FORCE_COLUMN_MAJOR
+   force_gemm_column_major(args);
+#endif
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasZgemmFunctor * functor = fselector->select_zgemm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
diff --git a/src/library/blas/xscal.cc b/src/library/blas/xscal.cc
new file mode 100644
index 0000000..102973d
--- /dev/null
+++ b/src/library/blas/xscal.cc
@@ -0,0 +1,340 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#include <stdio.h>
+#include <string.h>
+#include <clBLAS.h>
+
+#include <functor.h>
+#include <functor_selector.h>
+
+//
+// This file provide the functor based public clBLAS API for
+//
+//   clblasSscal() 
+//   clblasDscal() 
+//   clblasCscal() 
+//   clblasZscal() 
+//   clblasCsscal() 
+//   clblasZdscal() 
+//
+
+
+extern "C" 
+clblasStatus
+clblasSscal(
+    size_t N,
+    float alpha,
+    cl_mem X,
+    size_t offx,
+    int incx,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  CHECK_QUEUES(numCommandQueues, commandQueues);
+  CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+  CHECK_VECTOR_X(TYPE_FLOAT, N, X, offx, incx);
+
+  clblasSscalFunctor * functor ;
+
+  if ( numCommandQueues>1 ) 
+  {
+    numCommandQueues = 1 ;  // No support for multi-device (yet)
+  }
+
+  cl_command_queue queue = commandQueues[0]; 
+
+
+  clblasSscalFunctor::Args args(N,
+                                alpha,
+                                X,
+                                offx,
+                                incx,
+                                queue,
+                                numEventsInWaitList,
+                                eventWaitList,
+                                events);
+  
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+   
+   functor = fselector->select_sscal_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+
+
+extern "C" 
+clblasStatus
+clblasDscal(
+    size_t N,
+    double alpha,
+    cl_mem X,
+    size_t offx,
+    int incx,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  CHECK_QUEUES(numCommandQueues, commandQueues);
+  CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+  CHECK_VECTOR_X(TYPE_DOUBLE, N, X, offx, incx);
+
+  clblasDscalFunctor * functor ;
+
+  if ( numCommandQueues>1 ) 
+  {
+    numCommandQueues = 1 ;  // No support for multi-device (yet)
+  }
+
+  cl_command_queue queue = commandQueues[0]; 
+
+
+  clblasDscalFunctor::Args args(N,
+                                alpha,
+                                X,
+                                offx,
+                                incx,
+                                queue,
+                                numEventsInWaitList,
+                                eventWaitList,
+                                events);
+  
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+   
+   functor = fselector->select_dscal_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+	
+}
+
+
+
+
+
+extern "C" 
+clblasStatus
+clblasCscal(
+    size_t N,
+    cl_float2 alpha,
+    cl_mem X,
+    size_t offx,
+    int incx,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  CHECK_QUEUES(numCommandQueues, commandQueues);
+  CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+  CHECK_VECTOR_X(TYPE_COMPLEX_FLOAT, N, X, offx, incx);
+
+  clblasCscalFunctor * functor ;
+
+  if ( numCommandQueues>1 ) 
+  {
+    numCommandQueues = 1 ;  // No support for multi-device (yet)
+  }
+
+  cl_command_queue queue = commandQueues[0]; 
+
+
+  clblasCscalFunctor::Args args(N,
+                                alpha,
+                                X,
+                                offx,
+                                incx,
+                                queue,
+                                numEventsInWaitList,
+                                eventWaitList,
+                                events);
+  
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+   
+   functor = fselector->select_cscal_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+
+extern "C" 
+clblasStatus
+clblasZscal(
+    size_t N,
+    cl_double2 alpha,
+    cl_mem X,
+    size_t offx,
+    int incx,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  CHECK_QUEUES(numCommandQueues, commandQueues);
+  CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+  CHECK_VECTOR_X(TYPE_COMPLEX_DOUBLE, N, X, offx, incx);
+
+  clblasZscalFunctor * functor ;
+
+  if ( numCommandQueues>1 ) 
+  {
+    numCommandQueues = 1 ;  // No support for multi-device (yet)
+  }
+
+  cl_command_queue queue = commandQueues[0]; 
+
+
+  clblasZscalFunctor::Args args(N,
+                                alpha,
+                                X,
+                                offx,
+                                incx,
+                                queue,
+                                numEventsInWaitList,
+                                eventWaitList,
+                                events);
+  
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+   
+   functor = fselector->select_zscal_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;	
+}
+
+
+extern "C" 
+clblasStatus
+clblasCsscal(
+    size_t N,
+    float alpha,
+    cl_mem X,
+    size_t offx,
+    int incx,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  CHECK_QUEUES(numCommandQueues, commandQueues);
+  CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+  CHECK_VECTOR_X(TYPE_COMPLEX_FLOAT, N, X, offx, incx);
+
+  clblasCsscalFunctor * functor ;
+  
+  if ( numCommandQueues>1 ) 
+  {
+    numCommandQueues = 1 ;  // No support for multi-device (yet)
+  }
+
+  cl_command_queue queue = commandQueues[0]; 
+
+
+  clblasCsscalFunctor::Args args(N,
+                                alpha,
+                                X,
+                                offx,
+                                incx,
+                                queue,
+                                numEventsInWaitList,
+                                eventWaitList,
+                                events);
+  
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+   
+   functor = fselector->select_csscal_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+
+extern "C" 
+clblasStatus
+clblasZdscal(
+    size_t N,
+    double alpha,
+    cl_mem X,
+    size_t offx,
+    int incx,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+  CHECK_QUEUES(numCommandQueues, commandQueues);
+  CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+  CHECK_VECTOR_X(TYPE_COMPLEX_DOUBLE, N, X, offx, incx);
+
+  clblasZdscalFunctor * functor ;
+
+  if ( numCommandQueues>1 ) 
+  {
+    numCommandQueues = 1 ;  // No support for multi-device (yet)
+  }
+
+  cl_command_queue queue = commandQueues[0]; 
+
+
+  clblasZdscalFunctor::Args args(N,
+                                alpha,
+                                X,
+                                offx,
+                                incx,
+                                queue,
+                                numEventsInWaitList,
+                                eventWaitList,
+                                events);
+  
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+   
+   functor = fselector->select_zdscal_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;	
+}
+
diff --git a/src/library/blas/xtrsm.c b/src/library/blas/xtrsm.c
deleted file mode 100644
index d2fd7f0..0000000
--- a/src/library/blas/xtrsm.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/* ************************************************************************
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ************************************************************************/
-
-
-#include <string.h>
-#include <clBLAS.h>
-
-#include <devinfo.h>
-#include "clblas-internal.h"
-#include "solution_seq.h"
-
-static clblasStatus
-doTrsm(
-    CLBlasKargs *kargs,
-    clblasOrder order,
-    clblasSide side,
-    clblasUplo uplo,
-    clblasTranspose transA,
-    clblasDiag diag,
-    size_t M,
-    size_t N,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    cl_int err;
-    ListHead seq;
-    size_t msize;
-    clblasStatus retCode = clblasSuccess;
-
-    if (!clblasInitialized) {
-        return clblasNotInitialized;
-    }
-
-    /* Validate arguments */
-
-    if ((retCode = checkMemObjects(A, B, NULL, false, A_MAT_ERRSET, B_MAT_ERRSET, END_ERRSET ))) {
-        return retCode;
-    }
-    msize = (side == clblasLeft) ? M : N;
-
-    if ((retCode = checkMatrixSizes(kargs->dtype, order, transA, msize, msize,
-                                    A, offA, lda, A_MAT_ERRSET ))) {
-        return retCode;
-    }
-    if ((retCode = checkMatrixSizes(kargs->dtype, order, clblasNoTrans, M, N,
-                                    B, offB, ldb, B_MAT_ERRSET ))) {
-        return retCode;
-    }
-
-    kargs->order = order;
-    kargs->side = side;
-    kargs->uplo = uplo;
-    kargs->transA = transA;
-    kargs->diag = diag;
-    kargs->M = M;
-    kargs->N = N;
-    kargs->A = A;
-    kargs->offA = offA;
-    kargs->lda.matrix = lda;
-    kargs->B = B;
-    kargs->offBX = offB;
-    kargs->ldb.matrix = ldb;
-    // Store original problem size in K, this is used to know it while
-    // calculating result by parts using M or N as part size
-    if (side == clblasLeft) {
-        kargs->K = M;
-    }
-    else {
-        kargs->K = N;
-    }
-
-    kargs->offsetM = 0;
-    kargs->offsetN = 0;
-    kargs->scimage[0] = 0;
-
-#ifndef TRXM_MULTIPLE_QUEUES
-    if (numCommandQueues != 0) {
-        numCommandQueues = 1;
-    }
-#endif
-
-    listInitHead(&seq);
-    err = makeSolutionSeq(CLBLAS_TRSM, kargs, numCommandQueues, commandQueues,
-        numEventsInWaitList, eventWaitList, events, &seq);
-    if (err == CL_SUCCESS) {
-        err = executeSolutionSeq(&seq);
-    }
-
-    freeSolutionSeq(&seq);
-
-    return (clblasStatus)err;
-}
-
-clblasStatus
-clblasStrsm(
-    clblasOrder order,
-    clblasSide side,
-    clblasUplo uplo,
-    clblasTranspose transA,
-    clblasDiag diag,
-    size_t M,
-    size_t N,
-    cl_float alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_FLOAT;
-    kargs.alpha.argFloat = alpha;
-
-    return doTrsm(&kargs, order, side, uplo, transA, diag, M, N, A, offA, lda,
-                  B, offB, ldb, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-}
-
-clblasStatus
-clblasDtrsm(
-    clblasOrder order,
-    clblasSide side,
-    clblasUplo uplo,
-    clblasTranspose transA,
-    clblasDiag diag,
-    size_t M,
-    size_t N,
-    cl_double alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_DOUBLE;
-    kargs.alpha.argDouble = alpha;
-
-    return doTrsm(&kargs, order, side, uplo, transA, diag, M, N, A, offA, lda,
-                  B, offB, ldb, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-}
-
-clblasStatus
-clblasCtrsm(
-    clblasOrder order,
-    clblasSide side,
-    clblasUplo uplo,
-    clblasTranspose transA,
-    clblasDiag diag,
-    size_t M,
-    size_t N,
-    FloatComplex alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_COMPLEX_FLOAT;
-    kargs.alpha.argFloatComplex = alpha;
-    kargs.offA = offA;
-    kargs.offBX = offB;
-
-    return doTrsm(&kargs, order, side, uplo, transA, diag, M, N, A, offA, lda,
-                  B, offB, ldb, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-}
-
-clblasStatus
-clblasZtrsm(
-    clblasOrder order,
-    clblasSide side,
-    clblasUplo uplo,
-    clblasTranspose transA,
-    clblasDiag diag,
-    size_t M,
-    size_t N,
-    DoubleComplex alpha,
-    const cl_mem A,
-    size_t offA,
-    size_t lda,
-    cl_mem B,
-    size_t offB,
-    size_t ldb,
-    cl_uint numCommandQueues,
-    cl_command_queue *commandQueues,
-    cl_uint numEventsInWaitList,
-    const cl_event *eventWaitList,
-    cl_event *events)
-{
-    CLBlasKargs kargs;
-
-    memset(&kargs, 0, sizeof(kargs));
-    kargs.dtype = TYPE_COMPLEX_DOUBLE;
-    kargs.alpha.argDoubleComplex = alpha;
-
-    return doTrsm(&kargs, order, side, uplo, transA, diag, M, N, A, offA, lda,
-                  B, offB, ldb, numCommandQueues, commandQueues,
-                  numEventsInWaitList, eventWaitList, events);
-}
-
diff --git a/src/library/blas/xtrsm.cc b/src/library/blas/xtrsm.cc
new file mode 100644
index 0000000..288dbb1
--- /dev/null
+++ b/src/library/blas/xtrsm.cc
@@ -0,0 +1,333 @@
+ /************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#include <string.h>
+#include <clBLAS.h>
+
+#include <functor.h>
+#include <functor_selector.h>
+
+
+// Transform a trsm in clblasRowMajor into a trsm in clblasColumnMajor:
+//
+// The idea is basically that
+//   B = A*X
+// can be computed as 
+//   B' = (A*X)'
+//      = X'*A'
+// And since changing the order is basically a transpose on each matrix,
+// the formula becomes with the new order
+//   B = X*A (so only the side, the uplo must be changed and the M and N sizes swapped)
+//
+// When enabled, only the ColumnMajor kernels need to be implemented
+// for all TRSM 
+//
+
+#define FORCE_COLUMN_MAJOR 1
+
+#if FORCE_COLUMN_MAJOR
+template <typename Args>
+static void force_trsm_column_major(Args & args)
+{
+    if (args.order == clblasRowMajor)
+    {
+      std::swap(args.M, args.N);
+      args.side = (args.side == clblasLeft ) ? clblasRight : clblasLeft  ;
+      args.uplo = (args.uplo == clblasUpper) ? clblasLower : clblasUpper ;
+      args.order = clblasColumnMajor;
+    }
+}
+#endif
+
+//
+// This file provide the public clBLAS API for
+//
+//   clblasStrsm() 
+//   clblasDtrsm() 
+//   clblasCtrsm() 
+//   clblasZtrsm() 
+//
+// using functors 
+// 
+// Potential optimizations: 
+////
+//  - Get rid of the 'order' argument assuming that 
+//    row-major is equivalent to the transpose of column-major.
+//    That is  
+//
+//       B = A*X
+//
+//    is equivalent to 
+//
+//       B' = X'*A'
+//
+//    and, when considering the opposite order, is equivalent to   
+//
+//       B = X*A (with A swap between upper and lower)
+//
+//    By applying that transformation early, the functors implementing 
+//    the TRSMs only have to consider one of the two cases. 
+//
+
+
+extern "C" 
+clblasStatus 
+clblasStrsm(
+    clblasOrder order,
+    clblasSide side,
+    clblasUplo uplo,
+    clblasTranspose transA,
+    clblasDiag diag,
+    size_t M,
+    size_t N,
+    cl_float alpha,
+    const cl_mem A,
+    size_t offA,
+    size_t lda,
+    cl_mem B,
+    size_t offB,
+    size_t ldb,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasStrsmFunctor::Args args(order, 
+                                 side, 
+                                 uplo, 
+                                 transA, 
+                                 diag, 
+                                 M, N, 
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb, 
+                                 queue,
+                                 numEventsInWaitList, 
+                                 eventWaitList, 
+                                 events);
+
+
+#if FORCE_COLUMN_MAJOR
+   force_trsm_column_major(args);
+#endif
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasStrsmFunctor * functor = fselector->select_strsm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+extern "C" 
+clblasStatus 
+clblasDtrsm(
+    clblasOrder order,
+    clblasSide side,
+    clblasUplo uplo,
+    clblasTranspose transA,
+    clblasDiag diag,
+    size_t M,
+    size_t N,
+    cl_double alpha,
+    const cl_mem A,
+    size_t offA,
+    size_t lda,
+    cl_mem B,
+    size_t offB,
+    size_t ldb,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasDtrsmFunctor::Args args(order, 
+                                 side, 
+                                 uplo, 
+                                 transA, 
+                                 diag, 
+                                 M, N, 
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb, 
+                                 queue,
+                                 numEventsInWaitList, 
+                                 eventWaitList, 
+                                 events);
+
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasDtrsmFunctor * functor = fselector->select_dtrsm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+extern "C" 
+clblasStatus 
+clblasCtrsm(
+    clblasOrder order,
+    clblasSide side,
+    clblasUplo uplo,
+    clblasTranspose transA,
+    clblasDiag diag,
+    size_t M,
+    size_t N,
+    FloatComplex alpha,
+    const cl_mem A,
+    size_t offA,
+    size_t lda,
+    cl_mem B,
+    size_t offB,
+    size_t ldb,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasCtrsmFunctor::Args args(order, 
+                                 side, 
+                                 uplo, 
+                                 transA, 
+                                 diag, 
+                                 M, N, 
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb, 
+                                 queue,
+                                 numEventsInWaitList, 
+                                 eventWaitList, 
+                                 events);
+
+
+#if FORCE_COLUMN_MAJOR
+   force_trsm_column_major(args);
+#endif
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasCtrsmFunctor * functor = fselector->select_ctrsm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
+
+extern "C" 
+clblasStatus 
+clblasZtrsm(
+    clblasOrder order,
+    clblasSide side,
+    clblasUplo uplo,
+    clblasTranspose transA,
+    clblasDiag diag,
+    size_t M,
+    size_t N,
+    DoubleComplex alpha,
+    const cl_mem A,
+    size_t offA,
+    size_t lda,
+    cl_mem B,
+    size_t offB,
+    size_t ldb,
+    cl_uint numCommandQueues,
+    cl_command_queue *commandQueues,
+    cl_uint numEventsInWaitList,
+    const cl_event *eventWaitList,
+    cl_event *events)
+{
+   CHECK_QUEUES(numCommandQueues, commandQueues);
+   CHECK_EVENTS(numEventsInWaitList, eventWaitList);
+
+   if ( numCommandQueues>1 ) 
+   {
+       numCommandQueues = 1 ;  // No support for multi-device (yet)
+   }
+
+   cl_command_queue queue = commandQueues[0]; 
+
+   clblasZtrsmFunctor::Args args(order, 
+                                 side, 
+                                 uplo, 
+                                 transA, 
+                                 diag, 
+                                 M, N, 
+                                 alpha,
+                                 A, offA, lda,
+                                 B, offB, ldb, 
+                                 queue,
+                                 numEventsInWaitList, 
+                                 eventWaitList, 
+                                 events);
+
+
+#if FORCE_COLUMN_MAJOR
+   force_trsm_column_major(args);
+#endif
+
+   clblasFunctorSelector  * fselector = clblasFunctorSelector::find(queue);
+
+   clblasZtrsmFunctor * functor = fselector->select_ztrsm_specific(args);
+
+   clblasStatus res = functor->execute(args);
+
+   functor->release();
+
+   return res;
+}
diff --git a/src/library/common/devinfo.c b/src/library/common/devinfo.c
index dc37426..8607ce9 100644
--- a/src/library/common/devinfo.c
+++ b/src/library/common/devinfo.c
@@ -68,6 +68,12 @@ stringToChip(const char *str)
     else if (!strcmp(str, "Tahiti")) {
         chip = TAHITI;
     }
+    else if (!strcmp(str, "Hawaii")) {
+        chip = HAWAII;
+    }
+    else if (!strcmp(str, "Bonaire")) {
+        chip = BONAIRE;
+    }
     else if (!strcmp(str, "GeForce GTX 480")) {
         chip = GEFORCE_GTX_480;
     }
diff --git a/src/library/common/md5sum.c b/src/library/common/md5sum.c
new file mode 100644
index 0000000..075ff85
--- /dev/null
+++ b/src/library/common/md5sum.c
@@ -0,0 +1,378 @@
+/*
+ * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
+ * MD5 Message-Digest Algorithm (RFC 1321).
+ *
+ * Homepage:
+ * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
+ *
+ * Author:
+ * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
+ *
+ * This software was written by Alexander Peslyak in 2001.  No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted.
+ *
+ * There's ABSOLUTELY NO WARRANTY, express or implied.
+ *
+ * (This is a heavily cut-down "BSD license".)
+ *
+ * This differs from Colin Plumb's older public domain implementation in that
+ * no exactly 32-bit integer data type is required (any 32-bit or wider
+ * unsigned integer data type will do), there's no compile-time endianness
+ * configuration, and the function prototypes match OpenSSL's.  No code from
+ * Colin Plumb's implementation has been reused; this comment merely compares
+ * the properties of the two independent implementations.
+ *
+ * The primary goals of this implementation are portability and ease of use.
+ * It is meant to be fast, but not as fast as possible.  Some known
+ * optimizations are not included to reduce source code size and avoid
+ * compile-time configuration.
+ */
+ 
+#ifndef HAVE_OPENSSL
+ 
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+ 
+#include <md5sum.h>
+ 
+/*
+ * The basic MD5 functions.
+ *
+ * F and G are optimized compared to their RFC 1321 definitions for
+ * architectures that lack an AND-NOT instruction, just like in Colin Plumb's
+ * implementation.
+ */
+#define F(x, y, z)			((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z)			((y) ^ ((z) & ((x) ^ (y))))
+#define H(x, y, z)			(((x) ^ (y)) ^ (z))
+#define H2(x, y, z)			((x) ^ ((y) ^ (z)))
+#define I(x, y, z)			((y) ^ ((x) | ~(z)))
+ 
+/*
+ * The MD5 transformation for all four rounds.
+ */
+#define STEP(f, a, b, c, d, x, t, s) \
+	(a) += f((b), (c), (d)) + (x) + (t); \
+	(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
+	(a) += (b);
+ 
+/*
+ * SET reads 4 input bytes in little-endian byte order and stores them
+ * in a properly aligned word in host byte order.
+ *
+ * The check for little-endian architectures that tolerate unaligned
+ * memory accesses is just an optimization.  Nothing will break if it
+ * doesn't work.
+ */
+#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
+#define SET(n) \
+	(*(MD5_u32plus *)&ptr[(n) * 4])
+#define GET(n) \
+	SET(n)
+#else
+#define SET(n) \
+	(ctx->block[(n)] = \
+	(MD5_u32plus)ptr[(n) * 4] | \
+	((MD5_u32plus)ptr[(n) * 4 + 1] << 8) | \
+	((MD5_u32plus)ptr[(n) * 4 + 2] << 16) | \
+	((MD5_u32plus)ptr[(n) * 4 + 3] << 24))
+#define GET(n) \
+	(ctx->block[(n)])
+#endif
+ 
+/*
+ * This processes one or more 64-byte data blocks, but does NOT update
+ * the bit counters.  There are no alignment requirements.
+ */
+static const void *body(MD5_CTX *ctx, const void *data, unsigned long size)
+{
+	const unsigned char *ptr;
+	MD5_u32plus a, b, c, d;
+	MD5_u32plus saved_a, saved_b, saved_c, saved_d;
+ 
+	ptr = (const unsigned char *)data;
+ 
+	a = ctx->a;
+	b = ctx->b;
+	c = ctx->c;
+	d = ctx->d;
+ 
+	do {
+		saved_a = a;
+		saved_b = b;
+		saved_c = c;
+		saved_d = d;
+ 
+/* Round 1 */
+		STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
+		STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
+		STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
+		STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
+		STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
+		STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
+		STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
+		STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
+		STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
+		STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
+		STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
+		STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
+		STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
+		STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
+		STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
+		STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
+ 
+/* Round 2 */
+		STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
+		STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
+		STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
+		STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
+		STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
+		STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
+		STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
+		STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
+		STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
+		STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
+		STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
+		STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
+		STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
+		STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
+		STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
+		STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
+ 
+/* Round 3 */
+		STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
+		STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
+		STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
+		STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
+		STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
+		STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
+		STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
+		STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
+		STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
+		STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
+		STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
+		STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
+		STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
+		STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
+		STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
+		STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
+ 
+/* Round 4 */
+		STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
+		STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
+		STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
+		STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
+		STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
+		STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
+		STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
+		STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
+		STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
+		STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
+		STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
+		STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
+		STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
+		STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
+		STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
+		STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
+ 
+		a += saved_a;
+		b += saved_b;
+		c += saved_c;
+		d += saved_d;
+ 
+		ptr += 64;
+	} while (size -= 64);
+ 
+	ctx->a = a;
+	ctx->b = b;
+	ctx->c = c;
+	ctx->d = d;
+ 
+	return ptr;
+}
+ 
+void MD5_Init(MD5_CTX *ctx)
+{
+	ctx->a = 0x67452301;
+	ctx->b = 0xefcdab89;
+	ctx->c = 0x98badcfe;
+	ctx->d = 0x10325476;
+ 
+	ctx->lo = 0;
+	ctx->hi = 0;
+}
+ 
+void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size)
+{
+	MD5_u32plus saved_lo;
+	unsigned long used, available;
+ 
+	saved_lo = ctx->lo;
+	if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
+		ctx->hi++;
+	ctx->hi += size >> 29;
+ 
+	used = saved_lo & 0x3f;
+ 
+	if (used) {
+		available = 64 - used;
+ 
+		if (size < available) {
+			memcpy(&ctx->buffer[used], data, size);
+			return;
+		}
+ 
+		memcpy(&ctx->buffer[used], data, available);
+		data = (const unsigned char *)data + available;
+		size -= available;
+		body(ctx, ctx->buffer, 64);
+	}
+ 
+	if (size >= 64) {
+		data = body(ctx, data, size & ~(unsigned long)0x3f);
+		size &= 0x3f;
+	}
+ 
+	memcpy(ctx->buffer, data, size);
+}
+ 
+void MD5_Final(unsigned char *result, MD5_CTX *ctx)
+{
+	unsigned long used, available;
+ 
+	used = ctx->lo & 0x3f;
+ 
+	ctx->buffer[used++] = 0x80;
+ 
+	available = 64 - used;
+ 
+	if (available < 8) {
+		memset(&ctx->buffer[used], 0, available);
+		body(ctx, ctx->buffer, 64);
+		used = 0;
+		available = 64;
+	}
+ 
+	memset(&ctx->buffer[used], 0, available - 8);
+ 
+	ctx->lo <<= 3;
+	ctx->buffer[56] = ctx->lo;
+	ctx->buffer[57] = ctx->lo >> 8;
+	ctx->buffer[58] = ctx->lo >> 16;
+	ctx->buffer[59] = ctx->lo >> 24;
+	ctx->buffer[60] = ctx->hi;
+	ctx->buffer[61] = ctx->hi >> 8;
+	ctx->buffer[62] = ctx->hi >> 16;
+	ctx->buffer[63] = ctx->hi >> 24;
+ 
+	body(ctx, ctx->buffer, 64);
+ 
+	result[0] = ctx->a;
+	result[1] = ctx->a >> 8;
+	result[2] = ctx->a >> 16;
+	result[3] = ctx->a >> 24;
+	result[4] = ctx->b;
+	result[5] = ctx->b >> 8;
+	result[6] = ctx->b >> 16;
+	result[7] = ctx->b >> 24;
+	result[8] = ctx->c;
+	result[9] = ctx->c >> 8;
+	result[10] = ctx->c >> 16;
+	result[11] = ctx->c >> 24;
+	result[12] = ctx->d;
+	result[13] = ctx->d >> 8;
+	result[14] = ctx->d >> 16;
+	result[15] = ctx->d >> 24;
+ 
+	memset(ctx, 0, sizeof(*ctx));
+}
+ 
+
+char * md5sum(const void * data, unsigned long size)
+{
+    unsigned char digest[16];
+    int i;
+    char * md5string = (char*)malloc(33*sizeof(char));
+    MD5_CTX context;
+    MD5_Init(&context);
+    MD5_Update(&context, data, size);
+    MD5_Final(digest, &context);
+
+    for(i = 0; i < 16; ++i)
+        sprintf(&md5string[i*2], "%02x", (unsigned int)digest[i]);
+
+    return (char*) md5string;
+}
+#endif
+
+/*
+ * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
+ * MD5 Message-Digest Algorithm (RFC 1321).
+ *
+ * Homepage:
+ * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
+ *
+ * Author:
+ * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
+ *
+ * This software was written by Alexander Peslyak in 2001.  No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted.
+ *
+ * There's ABSOLUTELY NO WARRANTY, express or implied.
+ *
+ * See md5.c for more information.
+ */
+ 
+#ifdef HAVE_OPENSSL
+#include <openssl/md5.h>
+#elif !defined(_MD5_H)
+#define _MD5_H
+ 
+/* Any 32-bit or wider unsigned integer data type will do */
+typedef unsigned int MD5_u32plus;
+ 
+typedef struct {
+	MD5_u32plus lo, hi;
+	MD5_u32plus a, b, c, d;
+	unsigned char buffer[64];
+	MD5_u32plus block[16];
+} MD5_CTX;
+ 
+extern void MD5_Init(MD5_CTX *ctx);
+extern void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size);
+extern void MD5_Final(unsigned char *result, MD5_CTX *ctx);
+
+
+char * md5sum(const void * data, unsigned long size)
+{
+    unsigned char * digest = malloc(16 * sizeof(unsigned char));
+    MD5_CTX context;
+    MD5_Init(&context);
+    MD5_Update(&context, data, size);
+    MD5_Final(digest, &context);
+
+    char * md5string = malloc(33*sizeof(char));
+    for(int i = 0; i < 16; ++i)
+        sprintf(&md5string[i*2], "%02x", (unsigned int)digest[i]);
+
+    return md5string;
+}
+ 
+#endif
+
+
diff --git a/src/library/common/rwlock.c b/src/library/common/rwlock.c
new file mode 100644
index 0000000..d4341e2
--- /dev/null
+++ b/src/library/common/rwlock.c
@@ -0,0 +1,172 @@
+/* ************************************************************************
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ************************************************************************/
+
+
+#include <rwlock.h>
+
+#if defined(_MSC_VER)
+
+#pragma warning(push,3)
+// Need Synchapi.h since Windows 8 or Windows Server 2012?
+#include <windows.h>
+#pragma warning(pop)
+
+
+rwlock_t* 
+rwlockInit(void)
+{
+  PSRWLOCK rwlock;
+
+  rwlock = (PSRWLOCK)calloc(1, sizeof(SRWLOCK));
+  if (rwlock == NULL)
+      return NULL;
+
+  InitializeSRWLock(rwlock);
+
+  return (rwlock_t*) rwlock;
+}
+
+int 
+rwlockDestroy(rwlock_t *_rwlock)
+{
+  if (_rwlock == NULL) {
+    /* Mutex is invalid */
+    return 1;
+  }
+
+  free(_rwlock);
+
+  return 0;
+}
+
+int 
+rwlockReadLock(rwlock_t *_rwlock )
+{
+  BOOLEAN acquired = 0;
+   while(!acquired) 
+    acquired = TryAcquireSRWLockShared((PSRWLOCK) _rwlock);
+
+  return (acquired != 0);
+}
+
+
+int 
+rwlockWriteLock(rwlock_t *_rwlock )
+{
+  BOOLEAN acquired = 0;
+   while(!acquired) 
+    acquired = TryAcquireSRWLockExclusive((PSRWLOCK) _rwlock);
+
+  return (acquired != 0);
+}
+
+
+int 
+rwlockReadUnlock(rwlock_t *_rwlock )
+{
+  ReleaseSRWLockShared((PSRWLOCK) _rwlock);
+
+  return 0;
+}
+
+
+int 
+rwlockWriteUnlock(rwlock_t *_rwlock )
+{
+  ReleaseSRWLockExclusive((PSRWLOCK)_rwlock);
+
+  return 0;
+}
+
+
+
+#else /* defined(_MSC_VER) */
+
+#include <stdlib.h>
+#include <pthread.h>
+
+
+rwlock_t*
+rwlockInit(void)
+{
+    pthread_rwlock_t *rwlock;
+
+    rwlock = calloc(1, sizeof(pthread_rwlock_t));
+    if (rwlock == NULL)
+        return NULL;
+    if (pthread_rwlock_init(rwlock, NULL) != 0) {
+        free(rwlock);
+        return NULL;
+    }
+
+    return (rwlock_t*) rwlock;
+}
+
+int
+rwlockDestroy(rwlock_t *_rwlock)
+{
+    pthread_rwlock_t *rwlock = (pthread_rwlock_t*)_rwlock;
+
+    if (rwlock == NULL) {
+        /* Mutex is invalid */
+        return 1;
+    }
+    if (pthread_rwlock_destroy(rwlock) != 0) {
+        /* Mutex is busy or invalid */
+        return 1;
+    }
+
+    free(rwlock);
+    return 0;
+}
+
+
+int
+rwlockReadLock(rwlock_t *_rwlock )
+{
+    pthread_rwlock_t *rwlock = (pthread_rwlock_t*)_rwlock;
+
+    return (pthread_rwlock_rdlock(rwlock ) == 0) ? 0 : 1;
+}
+
+
+int
+rwlockWriteLock(rwlock_t *_rwlock ){
+
+    pthread_rwlock_t *rwlock = (pthread_rwlock_t*)_rwlock;
+    return (pthread_rwlock_wrlock(rwlock ) == 0) ? 0 : 1;
+
+}
+
+
+int
+rwlockReadUnlock(rwlock_t *_rwlock )
+{
+    pthread_rwlock_t *rwlock = (pthread_rwlock_t*)_rwlock;
+
+    return (pthread_rwlock_unlock(rwlock ) == 0) ? 0 : 1;
+}
+
+
+int
+rwlockWriteUnlock(rwlock_t *_rwlock )
+{
+    pthread_rwlock_t *rwlock = (pthread_rwlock_t*)_rwlock;
+
+    return (pthread_rwlock_unlock(rwlock ) == 0) ? 0 : 1;
+}
+
+#endif  /* defined (_MSC_VER) */
diff --git a/src/library/tools/bingen/CMakeLists.txt b/src/library/tools/bingen/CMakeLists.txt
new file mode 100644
index 0000000..487fed4
--- /dev/null
+++ b/src/library/tools/bingen/CMakeLists.txt
@@ -0,0 +1,33 @@
+# ########################################################################
+# Copyright 2013 Advanced Micro Devices, Inc.
+# 
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# 
+# http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ########################################################################
+
+cmake_minimum_required(VERSION 2.6)
+project(bingen C CXX)
+ADD_DEFINITIONS(/D_CRT_SECURE_NO_WARNINGS)
+ADD_EXECUTABLE(bingen bingen.cpp)
+target_link_libraries(bingen ${OPENCL_LIBRARIES})
+include_directories(${OPENCL_INCLUDE_DIRS})
+
+set_target_properties( bingen PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/staging" )
+  
+if ( MSVC )
+  set_target_properties( bingen PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG "${CMAKE_CURRENT_BINARY_DIR}/staging" )
+  set_target_properties( bingen PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE "${CMAKE_CURRENT_BINARY_DIR}/staging" )
+endif( )
+
+
+
+
diff --git a/src/library/tools/bingen/bingen.cpp b/src/library/tools/bingen/bingen.cpp
new file mode 100644
index 0000000..016cc23
--- /dev/null
+++ b/src/library/tools/bingen/bingen.cpp
@@ -0,0 +1,512 @@
+/* ************************************************************************
+* Copyright 2013 Advanced Micro Devices, Inc.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+* ************************************************************************/
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <string>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#ifdef __GNUC__
+// Linux
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#else
+// Windows
+#include <time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#define stat _stat
+#endif
+
+#include "CL/opencl.h"
+
+using namespace std;
+
+//const char* NameDevicesToSupport [] = {"Tahiti", "Hawaii"};
+char* NameDevicesToSupport = NULL;
+
+int writeBinaryToFile(const char* fileName, const char* binary, size_t numBytes)
+{
+  ofstream output;
+  output.open(fileName, ios::binary | ios::trunc);
+  if (output.is_open())
+  {
+    output.write(binary, numBytes);
+    output.close();
+    return 0;
+  }
+  else
+  {
+    return 1;
+  }
+}
+
+
+
+cl_int GenBinary(cl_context context, const char * source, const char * outFile)
+{
+  cl_int status = CL_SUCCESS;
+
+  size_t sourceSize[] = {strlen(source)};
+  cl_program program = clCreateProgramWithSource(context,1, &source,sourceSize,&status);
+  if (status!=CL_SUCCESS)
+  {
+    cout<<" error clCreateProgramWithSource, can't generate binaries"<<endl;
+    return status;
+  }
+
+
+  size_t nbDevicesInContext;
+  status = clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &nbDevicesInContext);
+  if (status!=CL_SUCCESS)
+  {
+    cout<<" error clGetContextInfo, can't generate binaries"<<endl;
+    return status;
+  }
+
+
+  //getting dev id of the only devices we support see NameDevicesToSupport
+  cl_device_id* devicesContext = new cl_device_id[nbDevicesInContext];
+  clGetContextInfo(context, CL_CONTEXT_DEVICES, nbDevicesInContext, devicesContext, NULL);
+  if (status!=CL_SUCCESS)
+  {
+    cout<<" error clGetContextInfo, can't generate binaries"<<endl;
+    return status;
+  }
+
+  //size_t numDevices = sizeof(NameDevicesToSupport)/sizeof(char *); 
+  cl_device_id DeviceToSupport ;
+  int j =0;
+  for(size_t i = 0; i < nbDevicesInContext; i++)
+  {    
+    char deviceName[1024];
+    status = clGetDeviceInfo(devicesContext[i], CL_DEVICE_NAME,sizeof(deviceName),deviceName, NULL);
+    if (!strcmp(deviceName, NameDevicesToSupport)   )
+    {
+      DeviceToSupport = devicesContext[i];
+      j++;
+      if (j==1)
+        break;
+    }
+  }
+
+  //building for the only devices we want to support
+  status = clBuildProgram(program,1,&DeviceToSupport,"",NULL,NULL);
+
+
+  /* This function is intentionally left without a error check
+  as it may not pass if kernels rely on specific properties of devices
+  In such cases, binaries for eligible devices are geenrated and dumped
+  even wen this function will return an error */
+  //CHECK_OPENCL_ERROR(status, "clBuildProgram failed.");
+#ifdef _DEBUG
+  if (status != CL_SUCCESS)
+  {
+    printf("clBuildProgram Failed\n");
+    printf("status = %d\n", status);
+    size_t len=0;
+
+    clGetProgramBuildInfo(program, DeviceToSupport, CL_PROGRAM_BUILD_LOG, 0, NULL, &len);
+    char* buffer= new char[len];
+
+    printf("Error: Failed to build program executable!\n");
+    clGetProgramBuildInfo(program, DeviceToSupport, CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, 0);
+    printf("%s\n", buffer);
+  }
+
+#endif
+
+
+
+  /* figure out the sizes of each of the binaries. */    
+  size_t *binarySizes = new size_t[1];
+  status = clGetProgramInfo(program,CL_PROGRAM_BINARY_SIZES,sizeof(size_t) * 1,binarySizes,NULL);
+
+  size_t i = 0;
+  /* copy over all of the generated binaries. */
+  char **binaries = new char*[1];
+
+  for(i = 0; i < 1; i++)
+  {
+    if(binarySizes[i] != 0)
+      binaries[i] = new char[binarySizes[i]];
+    else
+      binaries[i] = NULL;
+  }
+
+
+
+  status = clGetProgramInfo(program,CL_PROGRAM_BINARIES,sizeof(char *) * 1,binaries,NULL);
+
+  /* dump out each binary into its own separate file. */
+  for(i = 0; i < 1; i++)
+  {
+    char fileName[200];
+    char deviceName[1024];
+    status = clGetDeviceInfo(DeviceToSupport, CL_DEVICE_NAME,sizeof(deviceName),deviceName, NULL);
+
+    sprintf(fileName, "%s.%s", outFile, deviceName);
+
+    if(binarySizes[i] != 0)
+    {
+      cout<<deviceName<<" binary kernel: "<<fileName<<endl;
+      if(writeBinaryToFile(fileName,binaries[i],binarySizes[i]))
+      {
+        std::cout << "Failed to load kernel file : " << fileName << std::endl;
+        return 1;
+      }
+    }
+    else
+    {
+      cout<< deviceName<<  "binary kernel(" << fileName << ") : Skipping as there is no binary data to write!" <<endl;
+    }
+
+  }
+  // Release all resouces and memory
+  for(i = 0; i < 1; i++)
+  {
+    if(binaries[i] != NULL)
+    {
+      delete [] binaries[i];
+      binaries[i] = NULL;
+    }
+  }
+  if(binaries != NULL)
+  {
+    delete [] binaries;
+    binaries = NULL;
+  }
+  if(binarySizes != NULL)
+  {
+    delete [] binarySizes;
+    binarySizes = NULL;
+  }
+
+  if(devicesContext)
+  {
+    delete [] devicesContext;
+    devicesContext = NULL;
+  }
+  status = clReleaseProgram(program);
+
+
+
+  return 0;
+}
+
+cl_int FinCLPlatform(cl_platform_id& platform)
+{
+  cl_int status = CL_SUCCESS;
+  cl_uint numPlatforms;
+  //cl_platform_id platform = NULL;
+  status = clGetPlatformIDs(0, NULL, &numPlatforms);
+  if(status != CL_SUCCESS)
+  {
+    cout<<"Error: clGetPlatformIDs failed. Error code : ";
+    return status;
+  }
+
+  if (0 < numPlatforms) 
+  {
+    // Get selected platform
+    cl_platform_id* platforms = new cl_platform_id[numPlatforms];
+    status = clGetPlatformIDs(numPlatforms, platforms, NULL);
+    if(status != CL_SUCCESS)
+    {
+      cout<<"Error: clGetPlatformIDs failed. Error code : "<<status;
+      return status;
+    }
+
+    // Print all platforms
+    for (unsigned i = 0; i < numPlatforms; ++i) 
+    {
+      char pbuf[100];
+      status = clGetPlatformInfo(platforms[i],
+        CL_PLATFORM_VENDOR,
+        sizeof(pbuf),
+        pbuf,
+        NULL);
+
+      if(status != CL_SUCCESS)
+      {
+        cout<<"Error: clGetPlatformInfo failed. Error code : "<<status;
+        return status;
+      }
+
+      cout << "Platform " << i << " : " << pbuf << endl;
+    }
+
+    // Get AMD platform
+    for (unsigned i = 0; i < numPlatforms; ++i) 
+    {
+      char pbuf[100];
+      status = clGetPlatformInfo(platforms[i],
+        CL_PLATFORM_VENDOR,
+        sizeof(pbuf),
+        pbuf,
+        NULL);
+
+      if(status != CL_SUCCESS)
+      {
+        cout<<"Error: clGetPlatformInfo failed. Error code : "<<status;
+        return status;
+      }
+
+      platform = platforms[i];
+      if (!strcmp(pbuf, "Advanced Micro Devices, Inc.")) 
+      {
+        break;
+      }
+    }
+
+    // Check for AMD platform
+    char pbuf[100];
+    status = clGetPlatformInfo(platform,
+      CL_PLATFORM_VENDOR,
+      sizeof(pbuf),
+      pbuf,
+      NULL);
+
+    if(status != CL_SUCCESS)
+    {
+      cout<<"Error: clGetPlatformInfo failed. Error code : "<<status;
+      return status;
+    }
+    if (strcmp(pbuf, "Advanced Micro Devices, Inc.")) 
+    {
+      cout << "AMD platform not found" << endl;
+      return -1; 
+    }
+
+  }
+
+  return status;
+
+}
+
+//we know the input string will be in the form :static const char* kernelname = \"
+string FindKernelNameFromString(string inputLine)
+{
+  string result;
+  size_t pos = inputLine.find( '*' );
+
+  size_t StartKernelName = 1+pos;
+  char space = inputLine[StartKernelName];
+  while(space==' ')
+  {
+    StartKernelName++;
+    space = inputLine[StartKernelName];
+  }
+
+  size_t endKernelName = StartKernelName+1; 
+  space = inputLine[endKernelName];
+  while(space!=' ')
+  {
+    endKernelName++;
+    space = inputLine[endKernelName];
+  }
+  result = inputLine.substr(StartKernelName,endKernelName-StartKernelName );
+  return result;
+}
+
+
+int main( int argc, char *argv[] )
+{
+  cl_int status = CL_SUCCESS;
+  cl_platform_id platform = NULL;
+  cout<< "entering bingen function"<<endl;
+
+
+
+
+  if (argc < 3)
+  {
+    cout<< "we found only "<<argc <<" arguments"<<endl;
+    cout<< "needs at least 3 argument"<<endl;
+    cout<< "app [deviceName] [file name]..."<<endl;
+    return -1;
+  }
+
+
+  NameDevicesToSupport = argv[1];
+
+
+
+ // cl_uint error = clGetDeviceInfo(device, CL_DEVICE_ADDRESS_BITS, sizeof(cl_uint), &bitness, NULL);
+  int _64BitsUse = 0;
+#if defined( _WIN64 ) || defined(__LP64__) || defined(_LP64)
+  _64BitsUse = 1;
+#endif
+
+  status = FinCLPlatform(platform);
+
+  if(status!=CL_SUCCESS || platform==NULL)
+  {
+    cout<< "can't find a AMD platform for OpenCL" << endl;
+    return status;
+  }
+
+  cl_context_properties cps[5] =
+  {
+    CL_CONTEXT_PLATFORM,
+    (cl_context_properties)platform,
+    CL_CONTEXT_OFFLINE_DEVICES_AMD,
+    (cl_context_properties)1,
+    0
+  };
+  cl_context context = clCreateContextFromType(
+    cps,
+    CL_DEVICE_TYPE_ALL,
+    NULL,
+    NULL,
+    &status);
+
+  if(status!=CL_SUCCESS)
+  {
+    cout<<"can't create offline context"<<endl;
+    return status;
+  }
+
+  //const char * _64Bits = getenv("GPU_FORCE_64BIT_PTR");
+  const char *outputPrefix = "";
+  for ( int i=2; i<argc; i++ )
+  {
+    char cltFile[1024];
+
+    strcpy(cltFile, outputPrefix);
+#ifdef __GNUC__
+    //   strcat(cltFile, "/");
+#else
+    //strcat(cltFile, "\\" );
+#endif
+    strcat(cltFile, argv[i]);
+    strcat(cltFile,NameDevicesToSupport );
+    if (_64BitsUse)
+      strcat(cltFile, "_64");
+    else
+      strcat(cltFile, "_32");
+    strcat(cltFile, ".bin.cl");
+
+    //if( !isModified(argv[i], cltFile ) )
+    //{
+    //  continue;
+    //}
+	string tempfile(cltFile);
+	string subtempFile = tempfile.substr(tempfile.find_last_of("\\/") + 1, tempfile.length());
+	std::cout << "sub string " << subtempFile << std::endl;
+	
+    std::cout << "Processing " << argv[i] << std::endl;
+	std::cout << "outfile name " << subtempFile.c_str() << std::endl;
+
+    ifstream inFile( argv[i] );
+	ofstream outFile(subtempFile.c_str());
+
+    stringstream ss;
+
+    if( !(inFile.is_open()) ) 
+    {
+      cerr << "\tWARNING: couldn't open input file!" << std::endl;
+      continue;
+    }
+
+    if (!(outFile.is_open() ))
+    {
+      cerr << "\tWARNING: couldn't open output file!" << std::endl;
+      continue;
+    }
+
+    bool validKernel = false;
+    string kernelName;
+    string str;
+    size_t found;
+    int lineCount;
+    vector<string> kernelnames;
+    while( inFile.good() )
+    {
+      getline( inFile, str );
+
+      // Replace all tabs with spaces
+      found = str.find( '\t' );
+      while (found != string::npos)
+      {
+        str.replace( found, 1, "    " );
+        found = str.find( '\t' );
+      }
+
+      // Find for beginning of the kernel, which will give the kernelName
+      if ( !validKernel && (str.find( "char" ) != string::npos) && (str.find( '*' ) != string::npos)
+        && (str.find( '"' ) != string::npos))       // Beginning of the kernel
+      {
+        kernelName = FindKernelNameFromString(str);
+        if(_64BitsUse)
+          kernelName+="_64";
+        else
+          kernelName+="_32";
+
+        kernelName+="_bin";
+        kernelnames.push_back(kernelName);
+        validKernel = true;
+        // ss << str << "\\\n";
+        lineCount = 1;
+      }
+      // Deals with the case of a binary
+
+      // Find for end of kernel
+      else if( (str.find( "\";" ) != string::npos) && validKernel )
+      {
+        //ss << str << "\n\n\n";
+        cout<<"string kernel name: "<<kernelName<<endl;
+        GenBinary(context, ss.str().c_str(), kernelName.c_str());
+        validKernel = false;
+
+        //reinitialize ss
+        //ss=stringstream(); this doesn't work on linux with gcc4.4.7
+        ss.str("");
+        str.clear();
+
+      }
+      else if( validKernel )
+      {
+        ss << str <<"\n";//<< "\\n\\\n";                   // All other lines
+        lineCount ++;
+        // Break the string every 50 lines so that it does not overflow string limitations on windows
+        //if( (lineCount%50) == 0 )
+        //  ss << "\"\n\"\\\n";
+      } 
+
+    }
+    inFile.close();
+
+    // int numDevices = sizeof(NameDevicesToSupport)/sizeof(char *); 
+
+    cout<< "CL binaries generation terminated, now generating the .bin.cl file for tplgen process : "<< cltFile<< endl;
+
+    for (int i=0; i<kernelnames.size(); i++)
+    {
+      // for(int j = 0; j<numDevices; j++)
+      outFile << "const char " <<kernelnames[i]<<"_"<<NameDevicesToSupport<<" @"<<kernelnames[i]<<"."<<NameDevicesToSupport<<"\n";
+      cout << "const char " <<kernelnames[i]<<"_"<<NameDevicesToSupport<<" @"<<kernelnames[i]<<"."<<NameDevicesToSupport<<"\n";
+    }
+    outFile.close();
+  }
+
+  clReleaseContext(context);
+  return 0;
+}
diff --git a/src/library/tools/ktest/CMakeLists.txt b/src/library/tools/ktest/CMakeLists.txt
index 2cc8c31..6094197 100644
--- a/src/library/tools/ktest/CMakeLists.txt
+++ b/src/library/tools/ktest/CMakeLists.txt
@@ -45,10 +45,12 @@ set(KTEST_EXTERNAL_SRC
     ../../common/mutex.c
     ../../common/trace_malloc.c
     ../../common/gens/dblock_kgen.c
+    ../../common/md5sum.c
     ../../blas/impl.c
     ../../blas/scimage.c
     ../../blas/generic/blas_funcs.c
     ../../blas/generic/common.c
+    ../../blas/generic/common2.cc
     ../../blas/generic/events.c
     ../../blas/generic/kernel_extra.c
     ../../blas/generic/matrix_dims.c
@@ -57,6 +59,8 @@ set(KTEST_EXTERNAL_SRC
     ../../blas/generic/solution_seq_make.c
     ../../blas/generic/solution_seq.c
     ../../blas/generic/kdump.c
+    ../../blas/generic/binary_lookup.cc
+    ../../blas/generic/functor_cache.cc
     ../../blas/gens/tile.c
     ../../blas/gens/tile_iter.c
     ../../blas/gens/blas_subgroup.c
@@ -95,21 +99,21 @@ set(KTEST_EXTERNAL_SRC
     ../tune/storage_io.c
     ../tune/dimension.c
     
-	../../blas/gens/trmv_reg.cpp
-	../../blas/gens/ger_lds.cpp
-	../../blas/gens/trsv_trtri.cpp
-	../../blas/gens/trsv_gemv.cpp
-	../../blas/gens/kprintf.cpp
-	../../blas/gens/syr_lds.cpp
-	../../blas/gens/symm_cached.cpp
-	../../blas/gens/gemm_cached.cpp
-	../../blas/gens/gemm_tail_cached.cpp
-	../../blas/gens/syr2_lds.cpp
-	../../blas/gens/her_lds.cpp
-	../../blas/gens/her2_lds.cpp
-	../../blas/gens/gbmv.cpp
-	../../blas/gens/tuned_numbers.c
-	../../blas/gens/swap_reg.cpp
+    ../../blas/gens/trmv_reg.cpp
+    ../../blas/gens/ger_lds.cpp
+    ../../blas/gens/trsv_trtri.cpp
+    ../../blas/gens/trsv_gemv.cpp
+    ../../blas/gens/kprintf.cpp
+    ../../blas/gens/syr_lds.cpp
+    ../../blas/gens/symm_cached.cpp
+    ../../blas/gens/gemm_cached.cpp
+    ../../blas/gens/gemm_tail_cached.cpp
+    ../../blas/gens/syr2_lds.cpp
+    ../../blas/gens/her_lds.cpp
+    ../../blas/gens/her2_lds.cpp
+    ../../blas/gens/gbmv.cpp
+    ../../blas/gens/tuned_numbers.c
+    ../../blas/gens/swap_reg.cpp
     ../../blas/gens/scal_reg.cpp
     ../../blas/gens/copy_reg.cpp
     ../../blas/gens/axpy_reg.cpp
diff --git a/src/library/tools/tplgen/tplgen.cpp b/src/library/tools/tplgen/tplgen.cpp
index e81ecd2..d6c2deb 100644
--- a/src/library/tools/tplgen/tplgen.cpp
+++ b/src/library/tools/tplgen/tplgen.cpp
@@ -35,6 +35,51 @@
 
 using namespace std;
 
+void binaryCaseProcess(const string &inputStr, std::ostream &outFile)
+{
+  //Get the binary location in fileName
+  size_t found = inputStr.find( '@' );
+  string fileName = inputStr.substr (found+1);
+
+  //Open the binary
+  std::ifstream file (fileName.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
+  size_t fileSize;
+  if(!file.is_open())
+  {
+    std::cerr << "fail to open binary file '" <<  fileName << "'" << std::endl;
+    exit(1);
+  }
+
+  //Get contents of the binary
+  char* fileContents;
+  fileSize = file.tellg();
+  fileContents = new char[fileSize];
+  file.seekg(0, std::ios::beg);
+  if(!file.read(fileContents, fileSize))
+  {
+    std::cerr << "fail to read binary file '" <<  fileName << "'" << std::endl;
+    exit(1);
+  }
+  file.close();
+
+
+  outFile << "//generated from the binary: " << fileName << "\n";
+
+  //Copy the chars found before the @
+  outFile <<  inputStr.substr (0,found);
+
+  //Write contents of the binary
+  outFile << "[" << fileSize << "] = {\n";
+  for(int i=0; i < fileSize; i++)
+  {
+    outFile << (int) fileContents[i];
+    if(i < fileSize-1) outFile << ",";
+    if((i+1)%50 == 0) outFile << "\n";
+  }
+  outFile << "\n};\n";
+}
+
+
 bool isModified( char *clFile, char *clTFile )
 {
     struct stat queryClFile;
@@ -74,6 +119,9 @@ int main( int argc, char *argv[] )
     string str;
     int startOptions = 1;
     const char *outputPrefix = "";
+	const char *inputPrefix = "";
+	char tempInputPrefix[1024];
+	const char *inputfile = "";
 
     std::cout << "TPLGEN Running.....\n";
     if (argc < 2)
@@ -91,6 +139,12 @@ int main( int argc, char *argv[] )
         startOptions = 3;
     }
 
+	if (strcmp(argv[startOptions], "-i") == 0)
+	{
+		inputPrefix = argv[startOptions + 1];
+		startOptions += 2;
+	}
+
     for ( int i=startOptions; i<argc; i++ )
     {
         char cltFile[1024];
@@ -104,20 +158,28 @@ int main( int argc, char *argv[] )
         strcat(cltFile, argv[i]);
         strcat(cltFile, "T");
 
-        if( !isModified(argv[i], cltFile ) )
-        {
-            continue;
-        }
-        std::cout << "Processing " << argv[i] << std::endl;
-
-        ifstream inFile( argv[i] );
+        //if( !isModified(argv[i], cltFile ) )
+        //{
+        //    continue;
+        //}
+		strcpy(tempInputPrefix, inputPrefix);
+		inputfile = strcat(tempInputPrefix, argv[i]);
+		std::cout << "Processing " << inputfile << std::endl;
+		std::cout << "output file " << cltFile << std::endl;
+		
+		ifstream inFile(inputfile);
         ofstream outFile( cltFile );
 
-        if( !(inFile.is_open()) || !(outFile.is_open()) )
+        if( !(inFile.is_open()) )
         {
-            cerr << "\tWARNING: couldn't open file!" << std::endl;
+            cerr << "\tWARNING: couldn't open input file " <<  inputfile << std::endl;
             continue;
         }
+		if ( !(outFile.is_open()) )
+		{
+			cerr << "\tWARNING: couldn't open output file " << cltFile << std::endl;
+			continue;
+		}
 
         validKernel = false;
         while( inFile.good() )
@@ -140,6 +202,11 @@ int main( int argc, char *argv[] )
                 outFile << str << "\\\n";
                 lineCount = 1;
             }
+            // Deals with the case of a binary
+            else if( !validKernel && (str.find( "char" ) != string::npos) && (str.find( '@' ) != string::npos))
+            {
+              binaryCaseProcess(str, outFile);
+            }
             // Find for end of kernel
             else if( (str.find( "\";" ) != string::npos) && validKernel )
             {
diff --git a/src/library/tools/tune/CMakeLists.txt b/src/library/tools/tune/CMakeLists.txt
index 7f61513..3d2aa21 100644
--- a/src/library/tools/tune/CMakeLists.txt
+++ b/src/library/tools/tune/CMakeLists.txt
@@ -27,6 +27,7 @@ set(TOOLS_SRC
 
 set(TOOLS_EXTERNAL_SRC
     ../../blas/generic/common.c
+    ../../blas/generic/common2.cc
     ../../blas/generic/blas_funcs.c
     ../../blas/init.c
     ../../blas/impl.c
@@ -76,6 +77,7 @@ set(TOOLS_EXTERNAL_SRC
     ../../common/kgen_guard.c
     ../../common/clkern.c
     ../../common/trace_malloc.c
+    ../../common/md5sum.c
     ../../common/gens/dblock_kgen.c
     ../../blas/generic/solution_seq_make.c
     ../../blas/generic/solution_seq.c
@@ -83,21 +85,22 @@ set(TOOLS_EXTERNAL_SRC
     ../../blas/generic/problem_iter.c
     ../../blas/generic/kernel_extra.c
     ../../blas/generic/kdump.c
-	
-	../../blas/gens/trmv_reg.cpp
-	../../blas/gens/ger_lds.cpp
-	../../blas/gens/trsv_trtri.cpp
-	../../blas/gens/trsv_gemv.cpp
-	../../blas/gens/kprintf.cpp
-	../../blas/gens/syr_lds.cpp
-	../../blas/gens/symm_cached.cpp
-	../../blas/gens/gemm_cached.cpp
-	../../blas/gens/gemm_tail_cached.cpp
-	../../blas/gens/syr2_lds.cpp
-	../../blas/gens/her_lds.cpp
-	../../blas/gens/her2_lds.cpp
-	../../blas/gens/gbmv.cpp
-	../../blas/gens/tuned_numbers.c
+    ../../blas/generic/binary_lookup.cc
+    ../../blas/generic/functor_cache.cc
+    ../../blas/gens/trmv_reg.cpp
+    ../../blas/gens/ger_lds.cpp
+    ../../blas/gens/trsv_trtri.cpp
+    ../../blas/gens/trsv_gemv.cpp
+    ../../blas/gens/kprintf.cpp
+    ../../blas/gens/syr_lds.cpp
+    ../../blas/gens/symm_cached.cpp
+    ../../blas/gens/gemm_cached.cpp
+    ../../blas/gens/gemm_tail_cached.cpp
+    ../../blas/gens/syr2_lds.cpp
+    ../../blas/gens/her_lds.cpp
+    ../../blas/gens/her2_lds.cpp
+    ../../blas/gens/gbmv.cpp
+    ../../blas/gens/tuned_numbers.c
     ../../blas/gens/swap_reg.cpp
     ../../blas/gens/scal_reg.cpp
     ../../blas/gens/copy_reg.cpp
diff --git a/src/library/tools/tune/tune.c b/src/library/tools/tune/tune.c
index b6174c4..c21527f 100644
--- a/src/library/tools/tune/tune.c
+++ b/src/library/tools/tune/tune.c
@@ -578,7 +578,7 @@ genKernel(GParam *param, CLBLASKernExtra* extra, MemoryPattern *pattern)
     device = genInfo.targetDevice.id;
 
     setupBuildOpts(bopts, device, pattern);
-    param->kernel = makeKernel(device, genInfo.ctx, genKernel,
+    param->kernel = makeKernel(device, genInfo.ctx, genKernel, NULL /*cl_program*/,
                                param->dims, &param->pgran, extra, bopts, NULL);
     if (param->kernel != NULL) {
         status = clGetProgramInfo(param->kernel->program, CL_PROGRAM_BINARY_SIZES,
@@ -1285,8 +1285,9 @@ generatePrepKernel(
 
     k = makeKernel(
         device,
-        genInfo.ctx,
+        genInfo.ctx,        
         pattern->sops->genKernel,
+        0,
         param->dims,
         &pgran,
         extra,
diff --git a/src/samples/example_csscal.c b/src/samples/example_csscal.c
index 98dfcee..7415195 100644
--- a/src/samples/example_csscal.c
+++ b/src/samples/example_csscal.c
@@ -102,12 +102,13 @@ main(void)
         return 1;
     }
 
+
     /* Prepare OpenCL memory objects and place vectors inside them. */
     bufX = clCreateBuffer(ctx, CL_MEM_READ_WRITE, ( lenX * sizeof(cl_float2)),
                           NULL, &err);
 
     err = clEnqueueWriteBuffer(queue, bufX, CL_TRUE, 0,
-                    (lenX * sizeof(cl_float)), X, 0, NULL, NULL);
+                    (lenX * sizeof(cl_float2)), X, 0, NULL, NULL);
 
     /* Call clblas function. */
     err = clblasCsscal( N, alpha, bufX, 0, incx, 1, &queue, 0, NULL, &event);
diff --git a/src/tests/correctness/test-correctness.cpp b/src/tests/correctness/test-correctness.cpp
index 75da51a..67ac971 100644
--- a/src/tests/correctness/test-correctness.cpp
+++ b/src/tests/correctness/test-correctness.cpp
@@ -155,7 +155,8 @@ const int numQueues[] =
     { 3, 4 };
 #else                       /* MEDIUM_TESTS */
 const int smallRange[] =
-    { 15, 16, 33, 40, 62, 64, 128, 129, 256, 258 };
+    //{ 15, 16, 33, 40, 62, 64, 128, 129, 256, 258 };
+      { 8, 16, 17, 32, 62, 64, 128, 144, 256  };
     //{ 15, 16, 32, 33, 63, 64, 128, 129, 256, 257 };
 	//{ 3, 4, 15, 16, 32, 33, 63, 64, 128, 129, 256, 257, 333, 566, 787, 1024, 1025, 1113, 1111, 999, 883, 633, 17 };
 
diff --git a/src/tests/performance/test-performance.cpp b/src/tests/performance/test-performance.cpp
index df74132..26fb4ec 100644
--- a/src/tests/performance/test-performance.cpp
+++ b/src/tests/performance/test-performance.cpp
@@ -593,7 +593,8 @@ static const clblasUplo uploSet[] =
 static const clblasDiag diagSet[] =
     { clblasUnit, clblasNonUnit };
 
-const int sizeRange[] = { 2048, 2800, 4096, 5600 };
+const int sizeRange[]   = { 2048, 2800, 4096, 5600 };
+const int sizeRange48[] = { 41*48, 41*48+24 };
 // Since blas-1 contains only vector arrays, huge vectors has to be provided to reach the peak of the card
 const int blas1sizeRange[] = {4194304, 7840000, 16777216, 31360000 };
 //const int sizeRange[] = { 2800, 4096, 5600};
@@ -655,7 +656,7 @@ INSTANTIATE_TEST_CASE_P(Generic, GEMM, Combine(
 // Custom test - use command line arguments to tweak it
 INSTANTIATE_TEST_CASE_P(Custom, GEMM, Combine(
     ValuesIn(orderSet), ValuesIn(transSet), ValuesIn(transSet),
-    Values(32), Values(32), Values(32),
+    Values(41*48), Values(41*48), Values(41*48),
     Values(ExtraTestSizes()), Values(1)));
 #endif
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/clblas.git



More information about the debian-science-commits mailing list