You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mahout.apache.org by ap...@apache.org on 2016/06/08 21:40:06 UTC
[09/51] [partial] mahout git commit: (nojira) add native-viennaCL
module to codebase. closes apache/mahout#241
http://git-wip-us.apache.org/repos/asf/mahout/blob/f7c1f802/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/coordinate_matrix.hpp
----------------------------------------------------------------------
diff --git a/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/coordinate_matrix.hpp b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/coordinate_matrix.hpp
new file mode 100644
index 0000000..2f67a5b
--- /dev/null
+++ b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/coordinate_matrix.hpp
@@ -0,0 +1,405 @@
+#ifndef VIENNACL_LINALG_OPENCL_KERNELS_COORDINATE_MATRIX_HPP
+#define VIENNACL_LINALG_OPENCL_KERNELS_COORDINATE_MATRIX_HPP
+
+/* =========================================================================
+ Copyright (c) 2010-2016, Institute for Microelectronics,
+ Institute for Analysis and Scientific Computing,
+ TU Wien.
+ Portions of this software are copyright by UChicago Argonne, LLC.
+
+ -----------------
+ ViennaCL - The Vienna Computing Library
+ -----------------
+
+ Project Head: Karl Rupp rupp@iue.tuwien.ac.at
+
+ (A list of authors and contributors can be found in the manual)
+
+ License: MIT (X11), see file LICENSE in the base directory
+============================================================================= */
+
+#include "viennacl/tools/tools.hpp"
+#include "viennacl/ocl/kernel.hpp"
+#include "viennacl/ocl/platform.hpp"
+#include "viennacl/ocl/utils.hpp"
+
+#include "viennacl/linalg/opencl/common.hpp"
+
+/** @file viennacl/linalg/opencl/kernels/coordinate_matrix.hpp
+ * @brief OpenCL kernel file for coordinate_matrix operations */
+namespace viennacl
+{
+namespace linalg
+{
+namespace opencl
+{
+namespace kernels
+{
+
+//////////////////////////// Part 1: Kernel generation routines ////////////////////////////////////
+
+template<typename StringT>
+void generate_coordinate_matrix_vec_mul(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void vec_mul( \n");
+ source.append(" __global const uint2 * coords, \n");//(row_index, column_index)
+ source.append(" __global const "); source.append(numeric_string); source.append(" * elements, \n");
+ source.append(" __global const uint * group_boundaries, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append(" * x, \n");
+ source.append(" uint4 layout_x, \n");
+ source.append(" "); source.append(numeric_string); source.append(" alpha, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" * result, \n");
+ source.append(" uint4 layout_result, \n");
+ source.append(" "); source.append(numeric_string); source.append(" beta, \n");
+ source.append(" __local unsigned int * shared_rows, \n");
+ source.append(" __local "); source.append(numeric_string); source.append(" * inter_results) \n");
+ source.append("{ \n");
+ source.append(" uint2 tmp; \n");
+ source.append(" "); source.append(numeric_string); source.append(" val; \n");
+ source.append(" uint group_start = group_boundaries[get_group_id(0)]; \n");
+ source.append(" uint group_end = group_boundaries[get_group_id(0) + 1]; \n");
+ source.append(" uint k_end = (group_end > group_start) ? 1 + (group_end - group_start - 1) / get_local_size(0) : 0; \n"); // -1 in order to have correct behavior if group_end - group_start == j * get_local_size(0)
+
+ source.append(" uint local_index = 0; \n");
+
+ source.append(" for (uint k = 0; k < k_end; ++k) { \n");
+ source.append(" local_index = group_start + k * get_local_size(0) + get_local_id(0); \n");
+
+ source.append(" tmp = (local_index < group_end) ? coords[local_index] : (uint2) 0; \n");
+ source.append(" val = (local_index < group_end) ? elements[local_index] * x[tmp.y * layout_x.y + layout_x.x] : 0; \n");
+
+ //check for carry from previous loop run:
+ source.append(" if (get_local_id(0) == 0 && k > 0) { \n");
+ source.append(" if (tmp.x == shared_rows[get_local_size(0)-1]) \n");
+ source.append(" val += inter_results[get_local_size(0)-1]; \n");
+ source.append(" else if (beta != 0) \n");
+ source.append(" result[shared_rows[get_local_size(0)-1] * layout_result.y + layout_result.x] += alpha * inter_results[get_local_size(0)-1]; \n");
+ source.append(" else \n");
+ source.append(" result[shared_rows[get_local_size(0)-1] * layout_result.y + layout_result.x] = alpha * inter_results[get_local_size(0)-1]; \n");
+ source.append(" } \n");
+
+ //segmented parallel reduction begin
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" shared_rows[get_local_id(0)] = tmp.x; \n");
+ source.append(" inter_results[get_local_id(0)] = val; \n");
+ source.append(" "); source.append(numeric_string); source.append(" left = 0; \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+
+ source.append(" for (unsigned int stride = 1; stride < get_local_size(0); stride *= 2) { \n");
+ source.append(" left = (get_local_id(0) >= stride && tmp.x == shared_rows[get_local_id(0) - stride]) ? inter_results[get_local_id(0) - stride] : 0; \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" inter_results[get_local_id(0)] += left; \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" } \n");
+ //segmented parallel reduction end
+
+ source.append(" if (local_index < group_end - 1 && get_local_id(0) < get_local_size(0) - 1 && \n");
+ source.append(" shared_rows[get_local_id(0)] != shared_rows[get_local_id(0) + 1]) { \n");
+ source.append(" if (beta != 0) result[tmp.x * layout_result.y + layout_result.x] += alpha * inter_results[get_local_id(0)]; \n");
+ source.append(" else result[tmp.x * layout_result.y + layout_result.x] = alpha * inter_results[get_local_id(0)]; \n");
+ source.append(" } \n");
+
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" } \n"); //for k
+
+ source.append(" if (local_index + 1 == group_end) {\n"); //write results of last active entry (this may not necessarily be the case already)
+ source.append(" if (beta != 0) result[tmp.x * layout_result.y + layout_result.x] += alpha * inter_results[get_local_id(0)]; \n");
+ source.append(" else result[tmp.x * layout_result.y + layout_result.x] = alpha * inter_results[get_local_id(0)]; \n");
+ source.append(" } \n");
+ source.append("} \n");
+
+}
+
+namespace detail
+{
+ /** @brief Generate kernel for C = A * B with A being a compressed_matrix, B and C dense */
+ template<typename StringT>
+ void generate_coordinate_matrix_dense_matrix_mul(StringT & source, std::string const & numeric_string,
+ bool B_transposed, bool B_row_major, bool C_row_major)
+ {
+ source.append("__kernel void ");
+ source.append(viennacl::linalg::opencl::detail::sparse_dense_matmult_kernel_name(B_transposed, B_row_major, C_row_major));
+ source.append("( \n");
+ source.append(" __global const uint2 * coords, \n");//(row_index, column_index)
+ source.append(" __global const "); source.append(numeric_string); source.append(" * elements, \n");
+ source.append(" __global const uint * group_boundaries, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append(" * d_mat, \n");
+ source.append(" unsigned int d_mat_row_start, \n");
+ source.append(" unsigned int d_mat_col_start, \n");
+ source.append(" unsigned int d_mat_row_inc, \n");
+ source.append(" unsigned int d_mat_col_inc, \n");
+ source.append(" unsigned int d_mat_row_size, \n");
+ source.append(" unsigned int d_mat_col_size, \n");
+ source.append(" unsigned int d_mat_internal_rows, \n");
+ source.append(" unsigned int d_mat_internal_cols, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" * result, \n");
+ source.append(" unsigned int result_row_start, \n");
+ source.append(" unsigned int result_col_start, \n");
+ source.append(" unsigned int result_row_inc, \n");
+ source.append(" unsigned int result_col_inc, \n");
+ source.append(" unsigned int result_row_size, \n");
+ source.append(" unsigned int result_col_size, \n");
+ source.append(" unsigned int result_internal_rows, \n");
+ source.append(" unsigned int result_internal_cols, \n");
+ source.append(" __local unsigned int * shared_rows, \n");
+ source.append(" __local "); source.append(numeric_string); source.append(" * inter_results) \n");
+ source.append("{ \n");
+ source.append(" uint2 tmp; \n");
+ source.append(" "); source.append(numeric_string); source.append(" val; \n");
+ source.append(" uint group_start = group_boundaries[get_group_id(0)]; \n");
+ source.append(" uint group_end = group_boundaries[get_group_id(0) + 1]; \n");
+ source.append(" uint k_end = (group_end > group_start) ? 1 + (group_end - group_start - 1) / get_local_size(0) : 0; \n"); // -1 in order to have correct behavior if group_end - group_start == j * get_local_size(0)
+
+ source.append(" uint local_index = 0; \n");
+
+ source.append(" for (uint result_col = 0; result_col < result_col_size; ++result_col) { \n");
+ source.append(" for (uint k = 0; k < k_end; ++k) { \n");
+ source.append(" local_index = group_start + k * get_local_size(0) + get_local_id(0); \n");
+
+ source.append(" tmp = (local_index < group_end) ? coords[local_index] : (uint2) 0; \n");
+ if (B_transposed && B_row_major)
+ source.append(" val = (local_index < group_end) ? elements[local_index] * d_mat[ (d_mat_row_start + result_col * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + tmp.y * d_mat_col_inc ] : 0; \n");
+ else if (B_transposed && !B_row_major)
+ source.append(" val = (local_index < group_end) ? elements[local_index] * d_mat[ (d_mat_row_start + result_col * d_mat_row_inc) + (d_mat_col_start + tmp.y * d_mat_col_inc) * d_mat_internal_rows ] : 0; \n");
+ else if (!B_transposed && B_row_major)
+ source.append(" val = (local_index < group_end) ? elements[local_index] * d_mat[ (d_mat_row_start + tmp.y * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + result_col * d_mat_col_inc ] : 0; \n");
+ else
+ source.append(" val = (local_index < group_end) ? elements[local_index] * d_mat[ (d_mat_row_start + tmp.y * d_mat_row_inc) + (d_mat_col_start + result_col * d_mat_col_inc) * d_mat_internal_rows ] : 0; \n");
+
+ //check for carry from previous loop run:
+ source.append(" if (get_local_id(0) == 0 && k > 0) { \n");
+ source.append(" if (tmp.x == shared_rows[get_local_size(0)-1]) \n");
+ source.append(" val += inter_results[get_local_size(0)-1]; \n");
+ source.append(" else \n");
+ if (C_row_major)
+ source.append(" result[(shared_rows[get_local_size(0)-1] * result_row_inc + result_row_start) * result_internal_cols + result_col_start + result_col * result_col_inc ] = inter_results[get_local_size(0)-1]; \n");
+ else
+ source.append(" result[(shared_rows[get_local_size(0)-1] * result_row_inc + result_row_start) + (result_col_start + result_col * result_col_inc) * result_internal_rows ] = inter_results[get_local_size(0)-1]; \n");
+ source.append(" } \n");
+
+ //segmented parallel reduction begin
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" shared_rows[get_local_id(0)] = tmp.x; \n");
+ source.append(" inter_results[get_local_id(0)] = val; \n");
+ source.append(" "); source.append(numeric_string); source.append(" left = 0; \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+
+ source.append(" for (unsigned int stride = 1; stride < get_local_size(0); stride *= 2) { \n");
+ source.append(" left = (get_local_id(0) >= stride && tmp.x == shared_rows[get_local_id(0) - stride]) ? inter_results[get_local_id(0) - stride] : 0; \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" inter_results[get_local_id(0)] += left; \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" } \n");
+ //segmented parallel reduction end
+
+ source.append(" if (local_index < group_end && get_local_id(0) < get_local_size(0) - 1 && \n");
+ source.append(" shared_rows[get_local_id(0)] != shared_rows[get_local_id(0) + 1]) { \n");
+ if (C_row_major)
+ source.append(" result[(tmp.x * result_row_inc + result_row_start) * result_internal_cols + result_col_start + result_col * result_col_inc ] = inter_results[get_local_id(0)]; \n");
+ else
+ source.append(" result[(tmp.x * result_row_inc + result_row_start) + (result_col_start + result_col * result_col_inc) * result_internal_rows ] = inter_results[get_local_id(0)]; \n");
+ source.append(" } \n");
+
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" } \n"); //for k
+
+ source.append(" if (local_index + 1 == group_end) \n"); //write results of last active entry (this may not necessarily be the case already)
+ if (C_row_major)
+ source.append(" result[(tmp.x * result_row_inc + result_row_start) * result_internal_cols + result_col_start + result_col * result_col_inc ] = inter_results[get_local_id(0)]; \n");
+ else
+ source.append(" result[(tmp.x * result_row_inc + result_row_start) + (result_col_start + result_col * result_col_inc) * result_internal_rows ] = inter_results[get_local_id(0)]; \n");
+ source.append(" } \n"); //for result_col
+ source.append("} \n");
+
+ }
+}
+
+template<typename StringT>
+void generate_coordinate_matrix_dense_matrix_multiplication(StringT & source, std::string const & numeric_string)
+{
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, false, false, false);
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, false, false, true);
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, false, true, false);
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, false, true, true);
+
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, true, false, false);
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, true, false, true);
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, true, true, false);
+ detail::generate_coordinate_matrix_dense_matrix_mul(source, numeric_string, true, true, true);
+}
+
+template<typename StringT>
+void generate_coordinate_matrix_row_info_extractor(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void row_info_extractor( \n");
+ source.append(" __global const uint2 * coords, \n");//(row_index, column_index)
+ source.append(" __global const "); source.append(numeric_string); source.append(" * elements, \n");
+ source.append(" __global const uint * group_boundaries, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" * result, \n");
+ source.append(" unsigned int option, \n");
+ source.append(" __local unsigned int * shared_rows, \n");
+ source.append(" __local "); source.append(numeric_string); source.append(" * inter_results) \n");
+ source.append("{ \n");
+ source.append(" uint2 tmp; \n");
+ source.append(" "); source.append(numeric_string); source.append(" val; \n");
+ source.append(" uint last_index = get_local_size(0) - 1; \n");
+ source.append(" uint group_start = group_boundaries[get_group_id(0)]; \n");
+ source.append(" uint group_end = group_boundaries[get_group_id(0) + 1]; \n");
+ source.append(" uint k_end = (group_end > group_start) ? 1 + (group_end - group_start - 1) / get_local_size(0) : ("); source.append(numeric_string); source.append(")0; \n"); // -1 in order to have correct behavior if group_end - group_start == j * get_local_size(0)
+
+ source.append(" uint local_index = 0; \n");
+
+ source.append(" for (uint k = 0; k < k_end; ++k) \n");
+ source.append(" { \n");
+ source.append(" local_index = group_start + k * get_local_size(0) + get_local_id(0); \n");
+
+ source.append(" tmp = (local_index < group_end) ? coords[local_index] : (uint2) 0; \n");
+ source.append(" val = (local_index < group_end && (option != 3 || tmp.x == tmp.y) ) ? elements[local_index] : 0; \n");
+
+ //check for carry from previous loop run:
+ source.append(" if (get_local_id(0) == 0 && k > 0) \n");
+ source.append(" { \n");
+ source.append(" if (tmp.x == shared_rows[last_index]) \n");
+ source.append(" { \n");
+ source.append(" switch (option) \n");
+ source.append(" { \n");
+ source.append(" case 0: \n"); //inf-norm
+ source.append(" case 3: \n"); //diagonal entry
+ source.append(" val = max(val, fabs(inter_results[last_index])); \n");
+ source.append(" break; \n");
+
+ source.append(" case 1: \n"); //1-norm
+ source.append(" val = fabs(val) + inter_results[last_index]; \n");
+ source.append(" break; \n");
+
+ source.append(" case 2: \n"); //2-norm
+ source.append(" val = sqrt(val * val + inter_results[last_index]); \n");
+ source.append(" break; \n");
+
+ source.append(" default: \n");
+ source.append(" break; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+ source.append(" else \n");
+ source.append(" { \n");
+ source.append(" switch (option) \n");
+ source.append(" { \n");
+ source.append(" case 0: \n"); //inf-norm
+ source.append(" case 1: \n"); //1-norm
+ source.append(" case 3: \n"); //diagonal entry
+ source.append(" result[shared_rows[last_index]] = inter_results[last_index]; \n");
+ source.append(" break; \n");
+
+ source.append(" case 2: \n"); //2-norm
+ source.append(" result[shared_rows[last_index]] = sqrt(inter_results[last_index]); \n");
+ source.append(" default: \n");
+ source.append(" break; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+ source.append(" } \n");
+
+ //segmented parallel reduction begin
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" shared_rows[get_local_id(0)] = tmp.x; \n");
+ source.append(" switch (option) \n");
+ source.append(" { \n");
+ source.append(" case 0: \n");
+ source.append(" case 3: \n");
+ source.append(" inter_results[get_local_id(0)] = val; \n");
+ source.append(" break; \n");
+ source.append(" case 1: \n");
+ source.append(" inter_results[get_local_id(0)] = fabs(val); \n");
+ source.append(" break; \n");
+ source.append(" case 2: \n");
+ source.append(" inter_results[get_local_id(0)] = val * val; \n");
+ source.append(" default: \n");
+ source.append(" break; \n");
+ source.append(" } \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+
+ source.append(" for (unsigned int stride = 1; stride < get_local_size(0); stride *= 2) \n");
+ source.append(" { \n");
+ source.append(" "); source.append(numeric_string); source.append(" left = (get_local_id(0) >= stride && tmp.x == shared_rows[get_local_id(0) - stride]) ? inter_results[get_local_id(0) - stride] : ("); source.append(numeric_string); source.append(")0; \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" switch (option) \n");
+ source.append(" { \n");
+ source.append(" case 0: \n"); //inf-norm
+ source.append(" case 3: \n"); //diagonal entry
+ source.append(" inter_results[get_local_id(0)] = max(inter_results[get_local_id(0)], left); \n");
+ source.append(" break; \n");
+
+ source.append(" case 1: \n"); //1-norm
+ source.append(" inter_results[get_local_id(0)] += left; \n");
+ source.append(" break; \n");
+
+ source.append(" case 2: \n"); //2-norm
+ source.append(" inter_results[get_local_id(0)] += left; \n");
+ source.append(" break; \n");
+
+ source.append(" default: \n");
+ source.append(" break; \n");
+ source.append(" } \n");
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" } \n");
+ //segmented parallel reduction end
+
+ source.append(" if (get_local_id(0) != last_index && \n");
+ source.append(" shared_rows[get_local_id(0)] != shared_rows[get_local_id(0) + 1] && \n");
+ source.append(" inter_results[get_local_id(0)] != 0) \n");
+ source.append(" { \n");
+ source.append(" result[tmp.x] = (option == 2) ? sqrt(inter_results[get_local_id(0)]) : inter_results[get_local_id(0)]; \n");
+ source.append(" } \n");
+
+ source.append(" barrier(CLK_LOCAL_MEM_FENCE); \n");
+ source.append(" } \n"); //for k
+
+ source.append(" if (local_index + 1 == group_end && inter_results[get_local_id(0)] != 0) \n");
+ source.append(" result[tmp.x] = (option == 2) ? sqrt(inter_results[get_local_id(0)]) : inter_results[get_local_id(0)]; \n");
+ source.append("} \n");
+}
+
+//////////////////////////// Part 2: Main kernel class ////////////////////////////////////
+
+// main kernel class
+/** @brief Main kernel class for generating OpenCL kernels for coordinate_matrix. */
+template<typename NumericT>
+struct coordinate_matrix
+{
+ static std::string program_name()
+ {
+ return viennacl::ocl::type_to_string<NumericT>::apply() + "_coordinate_matrix";
+ }
+
+ static void init(viennacl::ocl::context & ctx)
+ {
+ static std::map<cl_context, bool> init_done;
+ if (!init_done[ctx.handle().get()])
+ {
+ viennacl::ocl::DOUBLE_PRECISION_CHECKER<NumericT>::apply(ctx);
+ std::string numeric_string = viennacl::ocl::type_to_string<NumericT>::apply();
+
+ std::string source;
+ source.reserve(1024);
+
+ viennacl::ocl::append_double_precision_pragma<NumericT>(ctx, source);
+
+ generate_coordinate_matrix_vec_mul(source, numeric_string);
+ generate_coordinate_matrix_dense_matrix_multiplication(source, numeric_string);
+ generate_coordinate_matrix_row_info_extractor(source, numeric_string);
+
+ std::string prog_name = program_name();
+ #ifdef VIENNACL_BUILD_INFO
+ std::cout << "Creating program " << prog_name << std::endl;
+ #endif
+ ctx.add_program(source, prog_name);
+ init_done[ctx.handle().get()] = true;
+ } //if
+ } //init
+};
+
+} // namespace kernels
+} // namespace opencl
+} // namespace linalg
+} // namespace viennacl
+#endif
+
http://git-wip-us.apache.org/repos/asf/mahout/blob/f7c1f802/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ell_matrix.hpp
----------------------------------------------------------------------
diff --git a/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ell_matrix.hpp b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ell_matrix.hpp
new file mode 100644
index 0000000..23c6af9
--- /dev/null
+++ b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ell_matrix.hpp
@@ -0,0 +1,221 @@
+#ifndef VIENNACL_LINALG_OPENCL_KERNELS_ELL_MATRIX_HPP
+#define VIENNACL_LINALG_OPENCL_KERNELS_ELL_MATRIX_HPP
+
+/* =========================================================================
+ Copyright (c) 2010-2016, Institute for Microelectronics,
+ Institute for Analysis and Scientific Computing,
+ TU Wien.
+ Portions of this software are copyright by UChicago Argonne, LLC.
+
+ -----------------
+ ViennaCL - The Vienna Computing Library
+ -----------------
+
+ Project Head: Karl Rupp rupp@iue.tuwien.ac.at
+
+ (A list of authors and contributors can be found in the manual)
+
+ License: MIT (X11), see file LICENSE in the base directory
+============================================================================= */
+
+#include "viennacl/tools/tools.hpp"
+#include "viennacl/ocl/kernel.hpp"
+#include "viennacl/ocl/platform.hpp"
+#include "viennacl/ocl/utils.hpp"
+
+#include "viennacl/linalg/opencl/common.hpp"
+
+/** @file viennacl/linalg/opencl/kernels/ell_matrix.hpp
+ * @brief OpenCL kernel file for ell_matrix operations */
+namespace viennacl
+{
+namespace linalg
+{
+namespace opencl
+{
+namespace kernels
+{
+
+//////////////////////////// Part 1: Kernel generation routines ////////////////////////////////////
+
+template<typename StringT>
+void generate_ell_vec_mul(StringT & source, std::string const & numeric_string, bool with_alpha_beta)
+{
+ if (with_alpha_beta)
+ source.append("__kernel void vec_mul_alpha_beta( \n");
+ else
+ source.append("__kernel void vec_mul( \n");
+ source.append(" __global const unsigned int * coords, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append(" * elements, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append(" * x, \n");
+ source.append(" uint4 layout_x, \n");
+ if (with_alpha_beta) { source.append(" "); source.append(numeric_string); source.append(" alpha, \n"); }
+ source.append(" __global "); source.append(numeric_string); source.append(" * result, \n");
+ source.append(" uint4 layout_result, \n");
+ if (with_alpha_beta) { source.append(" "); source.append(numeric_string); source.append(" beta, \n"); }
+ source.append(" unsigned int row_num, \n");
+ source.append(" unsigned int col_num, \n");
+ source.append(" unsigned int internal_row_num, \n");
+ source.append(" unsigned int items_per_row, \n");
+ source.append(" unsigned int aligned_items_per_row) \n");
+ source.append("{ \n");
+ source.append(" uint glb_id = get_global_id(0); \n");
+ source.append(" uint glb_sz = get_global_size(0); \n");
+
+ source.append(" for (uint row_id = glb_id; row_id < row_num; row_id += glb_sz) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" sum = 0; \n");
+
+ source.append(" uint offset = row_id; \n");
+ source.append(" for (uint item_id = 0; item_id < items_per_row; item_id++, offset += internal_row_num) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" val = elements[offset]; \n");
+
+ source.append(" if (val != 0.0f) { \n");
+ source.append(" int col = coords[offset]; \n");
+ source.append(" sum += (x[col * layout_x.y + layout_x.x] * val); \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+
+ if (with_alpha_beta)
+ source.append(" result[row_id * layout_result.y + layout_result.x] = alpha * sum + ((beta != 0) ? beta * result[row_id * layout_result.y + layout_result.x] : 0); \n");
+ else
+ source.append(" result[row_id * layout_result.y + layout_result.x] = sum; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+namespace detail
+{
+ template<typename StringT>
+ void generate_ell_matrix_dense_matrix_mul(StringT & source, std::string const & numeric_string,
+ bool B_transposed, bool B_row_major, bool C_row_major)
+ {
+ source.append("__kernel void ");
+ source.append(viennacl::linalg::opencl::detail::sparse_dense_matmult_kernel_name(B_transposed, B_row_major, C_row_major));
+ source.append("( \n");
+ source.append(" __global const unsigned int * sp_mat_coords, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append(" * sp_mat_elems, \n");
+ source.append(" unsigned int sp_mat_row_num, \n");
+ source.append(" unsigned int sp_mat_col_num, \n");
+ source.append(" unsigned int sp_mat_internal_row_num, \n");
+ source.append(" unsigned int sp_mat_items_per_row, \n");
+ source.append(" unsigned int sp_mat_aligned_items_per_row, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append("* d_mat, \n");
+ source.append(" unsigned int d_mat_row_start, \n");
+ source.append(" unsigned int d_mat_col_start, \n");
+ source.append(" unsigned int d_mat_row_inc, \n");
+ source.append(" unsigned int d_mat_col_inc, \n");
+ source.append(" unsigned int d_mat_row_size, \n");
+ source.append(" unsigned int d_mat_col_size, \n");
+ source.append(" unsigned int d_mat_internal_rows, \n");
+ source.append(" unsigned int d_mat_internal_cols, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" * result, \n");
+ source.append(" unsigned int result_row_start, \n");
+ source.append(" unsigned int result_col_start, \n");
+ source.append(" unsigned int result_row_inc, \n");
+ source.append(" unsigned int result_col_inc, \n");
+ source.append(" unsigned int result_row_size, \n");
+ source.append(" unsigned int result_col_size, \n");
+ source.append(" unsigned int result_internal_rows, \n");
+ source.append(" unsigned int result_internal_cols) { \n");
+
+ source.append(" uint glb_id = get_global_id(0); \n");
+ source.append(" uint glb_sz = get_global_size(0); \n");
+
+ source.append(" for ( uint rc = glb_id; rc < (sp_mat_row_num * result_col_size); rc += glb_sz) { \n");
+ source.append(" uint row = rc % sp_mat_row_num; \n");
+ source.append(" uint col = rc / sp_mat_row_num; \n");
+
+ source.append(" uint offset = row; \n");
+ source.append(" "); source.append(numeric_string); source.append(" r = ("); source.append(numeric_string); source.append(")0; \n");
+
+ source.append(" for ( uint k = 0; k < sp_mat_items_per_row; k++, offset += sp_mat_internal_row_num) { \n");
+
+ source.append(" uint j = sp_mat_coords[offset]; \n");
+ source.append(" "); source.append(numeric_string); source.append(" x = sp_mat_elems[offset]; \n");
+
+ source.append(" if (x != ("); source.append(numeric_string); source.append(")0) { \n");
+ source.append(" "); source.append(numeric_string);
+ if (B_transposed && B_row_major)
+ source.append(" y = d_mat[ (d_mat_row_start + col * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + j * d_mat_col_inc ]; \n");
+ else if (B_transposed && !B_row_major)
+ source.append(" y = d_mat[ (d_mat_row_start + col * d_mat_row_inc) + (d_mat_col_start + j * d_mat_col_inc) * d_mat_internal_rows ]; \n");
+ else if (!B_transposed && B_row_major)
+ source.append(" y = d_mat[ (d_mat_row_start + j * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + col * d_mat_col_inc ]; \n");
+ else
+ source.append(" y = d_mat[ (d_mat_row_start + j * d_mat_row_inc) + (d_mat_col_start + col * d_mat_col_inc) * d_mat_internal_rows ]; \n");
+
+ source.append(" r += x*y; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+
+ if (C_row_major)
+ source.append(" result[ (result_row_start + row * result_row_inc) * result_internal_cols + result_col_start + col * result_col_inc ] = r; \n");
+ else
+ source.append(" result[ (result_row_start + row * result_row_inc) + (result_col_start + col * result_col_inc) * result_internal_rows ] = r; \n");
+ source.append(" } \n");
+ source.append("} \n");
+
+ }
+}
+
+template<typename StringT>
+void generate_ell_matrix_dense_matrix_multiplication(StringT & source, std::string const & numeric_string)
+{
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, false, false, false);
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, false, false, true);
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, false, true, false);
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, false, true, true);
+
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, true, false, false);
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, true, false, true);
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, true, true, false);
+ detail::generate_ell_matrix_dense_matrix_mul(source, numeric_string, true, true, true);
+}
+
+//////////////////////////// Part 2: Main kernel class ////////////////////////////////////
+
+// main kernel class
+/** @brief Main kernel class for generating OpenCL kernels for ell_matrix. */
+template<typename NumericT>
+struct ell_matrix
+{
+ static std::string program_name()
+ {
+ return viennacl::ocl::type_to_string<NumericT>::apply() + "_ell_matrix";
+ }
+
+ static void init(viennacl::ocl::context & ctx)
+ {
+ static std::map<cl_context, bool> init_done;
+ if (!init_done[ctx.handle().get()])
+ {
+ viennacl::ocl::DOUBLE_PRECISION_CHECKER<NumericT>::apply(ctx);
+ std::string numeric_string = viennacl::ocl::type_to_string<NumericT>::apply();
+
+ std::string source;
+ source.reserve(1024);
+
+ viennacl::ocl::append_double_precision_pragma<NumericT>(ctx, source);
+
+ // fully parameterized kernels:
+ generate_ell_vec_mul(source, numeric_string, true);
+ generate_ell_vec_mul(source, numeric_string, false);
+ generate_ell_matrix_dense_matrix_multiplication(source, numeric_string);
+
+ std::string prog_name = program_name();
+ #ifdef VIENNACL_BUILD_INFO
+ std::cout << "Creating program " << prog_name << std::endl;
+ #endif
+ ctx.add_program(source, prog_name);
+ init_done[ctx.handle().get()] = true;
+ } //if
+ } //init
+};
+
+} // namespace kernels
+} // namespace opencl
+} // namespace linalg
+} // namespace viennacl
+#endif
+
http://git-wip-us.apache.org/repos/asf/mahout/blob/f7c1f802/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/fft.hpp
----------------------------------------------------------------------
diff --git a/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/fft.hpp b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/fft.hpp
new file mode 100644
index 0000000..1447bd1
--- /dev/null
+++ b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/fft.hpp
@@ -0,0 +1,311 @@
+#ifndef VIENNACL_LINALG_OPENCL_KERNELS_FFT_HPP
+#define VIENNACL_LINALG_OPENCL_KERNELS_FFT_HPP
+
+/* =========================================================================
+ Copyright (c) 2010-2016, Institute for Microelectronics,
+ Institute for Analysis and Scientific Computing,
+ TU Wien.
+ Portions of this software are copyright by UChicago Argonne, LLC.
+
+ -----------------
+ ViennaCL - The Vienna Computing Library
+ -----------------
+
+ Project Head: Karl Rupp rupp@iue.tuwien.ac.at
+
+ (A list of authors and contributors can be found in the manual)
+
+ License: MIT (X11), see file LICENSE in the base directory
+============================================================================= */
+
+#include "viennacl/tools/tools.hpp"
+#include "viennacl/ocl/kernel.hpp"
+#include "viennacl/ocl/platform.hpp"
+#include "viennacl/ocl/utils.hpp"
+
+/** @file viennacl/linalg/opencl/kernels/fft.hpp
+ * @brief OpenCL kernel file for FFT operations */
+namespace viennacl
+{
+namespace linalg
+{
+namespace opencl
+{
+namespace kernels
+{
+
+//////////////////////////// Part 1: Kernel generation routines ////////////////////////////////////
+
+
+// Postprocessing phase of Bluestein algorithm
+template<typename StringT>
+void generate_fft_bluestein_post(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void bluestein_post(__global "); source.append(numeric_string); source.append("2 *Z, \n");
+ source.append(" __global "); source.append(numeric_string); source.append("2 *out, \n");
+ source.append(" unsigned int size) \n");
+ source.append("{ \n");
+ source.append(" unsigned int glb_id = get_global_id(0); \n");
+ source.append(" unsigned int glb_sz = get_global_size(0); \n");
+
+ source.append(" unsigned int double_size = size << 1; \n");
+ source.append(" "); source.append(numeric_string); source.append(" sn_a, cs_a; \n");
+ source.append(" const "); source.append(numeric_string); source.append(" NUM_PI = 3.14159265358979323846; \n");
+
+ source.append(" for (unsigned int i = glb_id; i < size; i += glb_sz) { \n");
+ source.append(" unsigned int rm = i * i % (double_size); \n");
+ source.append(" "); source.append(numeric_string); source.append(" angle = ("); source.append(numeric_string); source.append(")rm / size * (-NUM_PI); \n");
+
+ source.append(" sn_a = sincos(angle, &cs_a); \n");
+
+ source.append(" "); source.append(numeric_string); source.append("2 b_i = ("); source.append(numeric_string); source.append("2)(cs_a, sn_a); \n");
+ source.append(" out[i] = ("); source.append(numeric_string); source.append("2)(Z[i].x * b_i.x - Z[i].y * b_i.y, Z[i].x * b_i.y + Z[i].y * b_i.x); \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+// Preprocessing phase of Bluestein algorithm
+template<typename StringT>
+void generate_fft_bluestein_pre(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void bluestein_pre(__global "); source.append(numeric_string); source.append("2 *input, \n");
+ source.append(" __global "); source.append(numeric_string); source.append("2 *A, \n");
+ source.append(" __global "); source.append(numeric_string); source.append("2 *B, \n");
+ source.append(" unsigned int size, \n");
+ source.append(" unsigned int ext_size \n");
+ source.append(" ) { \n");
+ source.append(" unsigned int glb_id = get_global_id(0); \n");
+ source.append(" unsigned int glb_sz = get_global_size(0); \n");
+
+ source.append(" unsigned int double_size = size << 1; \n");
+
+ source.append(" "); source.append(numeric_string); source.append(" sn_a, cs_a; \n");
+ source.append(" const "); source.append(numeric_string); source.append(" NUM_PI = 3.14159265358979323846; \n");
+
+ source.append(" for (unsigned int i = glb_id; i < size; i += glb_sz) { \n");
+ source.append(" unsigned int rm = i * i % (double_size); \n");
+ source.append(" "); source.append(numeric_string); source.append(" angle = ("); source.append(numeric_string); source.append(")rm / size * NUM_PI; \n");
+
+ source.append(" sn_a = sincos(-angle, &cs_a); \n");
+
+ source.append(" "); source.append(numeric_string); source.append("2 a_i = ("); source.append(numeric_string); source.append("2)(cs_a, sn_a); \n");
+ source.append(" "); source.append(numeric_string); source.append("2 b_i = ("); source.append(numeric_string); source.append("2)(cs_a, -sn_a); \n");
+
+ source.append(" A[i] = ("); source.append(numeric_string); source.append("2)(input[i].x * a_i.x - input[i].y * a_i.y, input[i].x * a_i.y + input[i].y * a_i.x); \n");
+ source.append(" B[i] = b_i; \n");
+
+ // very bad instruction, to be fixed
+ source.append(" if (i) \n");
+ source.append(" B[ext_size - i] = b_i; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+/** @brief Extract real part of a complex number array */
+template<typename StringT>
+void generate_fft_complex_to_real(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void complex_to_real(__global "); source.append(numeric_string); source.append("2 *in, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *out, \n");
+ source.append(" unsigned int size) { \n");
+ source.append(" for (unsigned int i = get_global_id(0); i < size; i += get_global_size(0)) \n");
+ source.append(" out[i] = in[i].x; \n");
+ source.append("} \n");
+}
+
+/** @brief OpenCL kernel generation code for dividing a complex number by a real number */
+template<typename StringT>
+void generate_fft_div_vec_scalar(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void fft_div_vec_scalar(__global "); source.append(numeric_string); source.append("2 *input1, \n");
+ source.append(" unsigned int size, \n");
+ source.append(" "); source.append(numeric_string); source.append(" factor) { \n");
+ source.append(" for (unsigned int i = get_global_id(0); i < size; i += get_global_size(0)) \n");
+ source.append(" input1[i] /= factor; \n");
+ source.append("} \n");
+}
+
+/** @brief Elementwise product of two complex vectors */
+template<typename StringT>
+void generate_fft_mult_vec(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void fft_mult_vec(__global const "); source.append(numeric_string); source.append("2 *input1, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append("2 *input2, \n");
+ source.append(" __global "); source.append(numeric_string); source.append("2 *output, \n");
+ source.append(" unsigned int size) { \n");
+ source.append(" for (unsigned int i = get_global_id(0); i < size; i += get_global_size(0)) { \n");
+ source.append(" "); source.append(numeric_string); source.append("2 in1 = input1[i]; \n");
+ source.append(" "); source.append(numeric_string); source.append("2 in2 = input2[i]; \n");
+
+ source.append(" output[i] = ("); source.append(numeric_string); source.append("2)(in1.x * in2.x - in1.y * in2.y, in1.x * in2.y + in1.y * in2.x); \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+/** @brief Embedds a real-valued vector into a complex one */
+template<typename StringT>
+void generate_fft_real_to_complex(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void real_to_complex(__global "); source.append(numeric_string); source.append(" *in, \n");
+ source.append(" __global "); source.append(numeric_string); source.append("2 *out, \n");
+ source.append(" unsigned int size) { \n");
+ source.append(" for (unsigned int i = get_global_id(0); i < size; i += get_global_size(0)) { \n");
+ source.append(" "); source.append(numeric_string); source.append("2 val = 0; \n");
+ source.append(" val.x = in[i]; \n");
+ source.append(" out[i] = val; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+/** @brief Reverses the entries in a vector */
+template<typename StringT>
+void generate_fft_reverse_inplace(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void reverse_inplace(__global "); source.append(numeric_string); source.append(" *vec, uint size) { \n");
+ source.append(" for (uint i = get_global_id(0); i < (size >> 1); i+=get_global_size(0)) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" val1 = vec[i]; \n");
+ source.append(" "); source.append(numeric_string); source.append(" val2 = vec[size - i - 1]; \n");
+
+ source.append(" vec[i] = val2; \n");
+ source.append(" vec[size - i - 1] = val1; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+/** @brief Simplistic matrix transpose function */
+template<typename StringT>
+void generate_fft_transpose(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void transpose(__global "); source.append(numeric_string); source.append("2 *input, \n");
+ source.append(" __global "); source.append(numeric_string); source.append("2 *output, \n");
+ source.append(" unsigned int row_num, \n");
+ source.append(" unsigned int col_num) { \n");
+ source.append(" unsigned int size = row_num * col_num; \n");
+ source.append(" for (unsigned int i = get_global_id(0); i < size; i+= get_global_size(0)) { \n");
+ source.append(" unsigned int row = i / col_num; \n");
+ source.append(" unsigned int col = i - row*col_num; \n");
+
+ source.append(" unsigned int new_pos = col * row_num + row; \n");
+
+ source.append(" output[new_pos] = input[i]; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+/** @brief Simplistic inplace matrix transpose function */
+template<typename StringT>
+void generate_fft_transpose_inplace(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void transpose_inplace(__global "); source.append(numeric_string); source.append("2* input, \n");
+ source.append(" unsigned int row_num, \n");
+ source.append(" unsigned int col_num) { \n");
+ source.append(" unsigned int size = row_num * col_num; \n");
+ source.append(" for (unsigned int i = get_global_id(0); i < size; i+= get_global_size(0)) { \n");
+ source.append(" unsigned int row = i / col_num; \n");
+ source.append(" unsigned int col = i - row*col_num; \n");
+
+ source.append(" unsigned int new_pos = col * row_num + row; \n");
+
+ source.append(" if (i < new_pos) { \n");
+ source.append(" "); source.append(numeric_string); source.append("2 val = input[i]; \n");
+ source.append(" input[i] = input[new_pos]; \n");
+ source.append(" input[new_pos] = val; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+/** @brief Computes the matrix vector product with a Vandermonde matrix */
+template<typename StringT>
+void generate_fft_vandermonde_prod(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void vandermonde_prod(__global "); source.append(numeric_string); source.append(" *vander, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *vector, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *result, \n");
+ source.append(" uint size) { \n");
+ source.append(" for (uint i = get_global_id(0); i < size; i+= get_global_size(0)) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" mul = vander[i]; \n");
+ source.append(" "); source.append(numeric_string); source.append(" pwr = 1; \n");
+ source.append(" "); source.append(numeric_string); source.append(" val = 0; \n");
+
+ source.append(" for (uint j = 0; j < size; j++) { \n");
+ source.append(" val = val + pwr * vector[j]; \n");
+ source.append(" pwr *= mul; \n");
+ source.append(" } \n");
+
+ source.append(" result[i] = val; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+/** @brief Zero two complex vectors (to avoid kernel launch overhead) */
+template<typename StringT>
+void generate_fft_zero2(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void zero2(__global "); source.append(numeric_string); source.append("2 *input1, \n");
+ source.append(" __global "); source.append(numeric_string); source.append("2 *input2, \n");
+ source.append(" unsigned int size) { \n");
+ source.append(" for (unsigned int i = get_global_id(0); i < size; i += get_global_size(0)) { \n");
+ source.append(" input1[i] = 0; \n");
+ source.append(" input2[i] = 0; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+//////////////////////////// Part 2: Main kernel class ////////////////////////////////////
+
+// main kernel class
+/** @brief Main kernel class for generating OpenCL kernels for the fast Fourier transform. */
+template<typename NumericT>
+struct fft
+{
+ static std::string program_name()
+ {
+ return viennacl::ocl::type_to_string<NumericT>::apply() + "_fft";
+ }
+
+ static void init(viennacl::ocl::context & ctx)
+ {
+ static std::map<cl_context, bool> init_done;
+ if (!init_done[ctx.handle().get()])
+ {
+ viennacl::ocl::DOUBLE_PRECISION_CHECKER<NumericT>::apply(ctx);
+ std::string numeric_string = viennacl::ocl::type_to_string<NumericT>::apply();
+
+ std::string source;
+ source.reserve(8192);
+
+ viennacl::ocl::append_double_precision_pragma<NumericT>(ctx, source);
+
+ // unary operations
+ if (numeric_string == "float" || numeric_string == "double")
+ {
+ generate_fft_bluestein_post(source, numeric_string);
+ generate_fft_bluestein_pre(source, numeric_string);
+ generate_fft_complex_to_real(source, numeric_string);
+ generate_fft_div_vec_scalar(source, numeric_string);
+ generate_fft_mult_vec(source, numeric_string);
+ generate_fft_real_to_complex(source, numeric_string);
+ generate_fft_reverse_inplace(source, numeric_string);
+ generate_fft_transpose(source, numeric_string);
+ generate_fft_transpose_inplace(source, numeric_string);
+ generate_fft_vandermonde_prod(source, numeric_string);
+ generate_fft_zero2(source, numeric_string);
+ }
+
+ std::string prog_name = program_name();
+ #ifdef VIENNACL_BUILD_INFO
+ std::cout << "Creating program " << prog_name << std::endl;
+ #endif
+ ctx.add_program(source, prog_name);
+ init_done[ctx.handle().get()] = true;
+ } //if
+ } //init
+};
+
+} // namespace kernels
+} // namespace opencl
+} // namespace linalg
+} // namespace viennacl
+#endif
+
http://git-wip-us.apache.org/repos/asf/mahout/blob/f7c1f802/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/hyb_matrix.hpp
----------------------------------------------------------------------
diff --git a/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/hyb_matrix.hpp b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/hyb_matrix.hpp
new file mode 100644
index 0000000..83d1411
--- /dev/null
+++ b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/hyb_matrix.hpp
@@ -0,0 +1,240 @@
+#ifndef VIENNACL_LINALG_OPENCL_KERNELS_HYB_MATRIX_HPP
+#define VIENNACL_LINALG_OPENCL_KERNELS_HYB_MATRIX_HPP
+
+/* =========================================================================
+ Copyright (c) 2010-2016, Institute for Microelectronics,
+ Institute for Analysis and Scientific Computing,
+ TU Wien.
+ Portions of this software are copyright by UChicago Argonne, LLC.
+
+ -----------------
+ ViennaCL - The Vienna Computing Library
+ -----------------
+
+ Project Head: Karl Rupp rupp@iue.tuwien.ac.at
+
+ (A list of authors and contributors can be found in the manual)
+
+ License: MIT (X11), see file LICENSE in the base directory
+============================================================================= */
+
+#include "viennacl/tools/tools.hpp"
+#include "viennacl/ocl/kernel.hpp"
+#include "viennacl/ocl/platform.hpp"
+#include "viennacl/ocl/utils.hpp"
+
+#include "viennacl/linalg/opencl/common.hpp"
+
+/** @file viennacl/linalg/opencl/kernels/hyb_matrix.hpp
+ * @brief OpenCL kernel file for hyb_matrix operations */
+namespace viennacl
+{
+namespace linalg
+{
+namespace opencl
+{
+namespace kernels
+{
+
+//////////////////////////// Part 1: Kernel generation routines ////////////////////////////////////
+
+template<typename StringT>
+void generate_hyb_vec_mul(StringT & source, std::string const & numeric_string, bool with_alpha_beta)
+{
+ if (with_alpha_beta)
+ source.append("__kernel void vec_mul_alpha_beta( \n");
+ else
+ source.append("__kernel void vec_mul( \n");
+ source.append(" const __global int* ell_coords, \n");
+ source.append(" const __global "); source.append(numeric_string); source.append("* ell_elements, \n");
+ source.append(" const __global uint* csr_rows, \n");
+ source.append(" const __global uint* csr_cols, \n");
+ source.append(" const __global "); source.append(numeric_string); source.append("* csr_elements, \n");
+ source.append(" const __global "); source.append(numeric_string); source.append(" * x, \n");
+ source.append(" uint4 layout_x, \n");
+ if (with_alpha_beta) { source.append(" "); source.append(numeric_string); source.append(" alpha, \n"); }
+ source.append(" __global "); source.append(numeric_string); source.append(" * result, \n");
+ source.append(" uint4 layout_result, \n");
+ if (with_alpha_beta) { source.append(" "); source.append(numeric_string); source.append(" beta, \n"); }
+ source.append(" unsigned int row_num, \n");
+ source.append(" unsigned int internal_row_num, \n");
+ source.append(" unsigned int items_per_row, \n");
+ source.append(" unsigned int aligned_items_per_row) \n");
+ source.append("{ \n");
+ source.append(" uint glb_id = get_global_id(0); \n");
+ source.append(" uint glb_sz = get_global_size(0); \n");
+
+ source.append(" for (uint row_id = glb_id; row_id < row_num; row_id += glb_sz) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" sum = 0; \n");
+
+ source.append(" uint offset = row_id; \n");
+ source.append(" for (uint item_id = 0; item_id < items_per_row; item_id++, offset += internal_row_num) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" val = ell_elements[offset]; \n");
+
+ source.append(" if (val != ("); source.append(numeric_string); source.append(")0) { \n");
+ source.append(" int col = ell_coords[offset]; \n");
+ source.append(" sum += (x[col * layout_x.y + layout_x.x] * val); \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+
+ source.append(" uint col_begin = csr_rows[row_id]; \n");
+ source.append(" uint col_end = csr_rows[row_id + 1]; \n");
+
+ source.append(" for (uint item_id = col_begin; item_id < col_end; item_id++) { \n");
+ source.append(" sum += (x[csr_cols[item_id] * layout_x.y + layout_x.x] * csr_elements[item_id]); \n");
+ source.append(" } \n");
+
+ if (with_alpha_beta)
+ source.append(" result[row_id * layout_result.y + layout_result.x] = alpha * sum + ((beta != 0) ? beta * result[row_id * layout_result.y + layout_result.x] : 0); \n");
+ else
+ source.append(" result[row_id * layout_result.y + layout_result.x] = sum; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+namespace detail
+{
+ template<typename StringT>
+ void generate_hyb_matrix_dense_matrix_mul(StringT & source, std::string const & numeric_string,
+ bool B_transposed, bool B_row_major, bool C_row_major)
+ {
+ source.append("__kernel void ");
+ source.append(viennacl::linalg::opencl::detail::sparse_dense_matmult_kernel_name(B_transposed, B_row_major, C_row_major));
+ source.append("( \n");
+ source.append(" const __global int* ell_coords, \n");
+ source.append(" const __global "); source.append(numeric_string); source.append("* ell_elements, \n");
+ source.append(" const __global uint* csr_rows, \n");
+ source.append(" const __global uint* csr_cols, \n");
+ source.append(" const __global "); source.append(numeric_string); source.append("* csr_elements, \n");
+ source.append(" unsigned int row_num, \n");
+ source.append(" unsigned int internal_row_num, \n");
+ source.append(" unsigned int items_per_row, \n");
+ source.append(" unsigned int aligned_items_per_row, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append("* d_mat, \n");
+ source.append(" unsigned int d_mat_row_start, \n");
+ source.append(" unsigned int d_mat_col_start, \n");
+ source.append(" unsigned int d_mat_row_inc, \n");
+ source.append(" unsigned int d_mat_col_inc, \n");
+ source.append(" unsigned int d_mat_row_size, \n");
+ source.append(" unsigned int d_mat_col_size, \n");
+ source.append(" unsigned int d_mat_internal_rows, \n");
+ source.append(" unsigned int d_mat_internal_cols, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" * result, \n");
+ source.append(" unsigned int result_row_start, \n");
+ source.append(" unsigned int result_col_start, \n");
+ source.append(" unsigned int result_row_inc, \n");
+ source.append(" unsigned int result_col_inc, \n");
+ source.append(" unsigned int result_row_size, \n");
+ source.append(" unsigned int result_col_size, \n");
+ source.append(" unsigned int result_internal_rows, \n");
+ source.append(" unsigned int result_internal_cols) { \n");
+
+ source.append(" uint glb_id = get_global_id(0); \n");
+ source.append(" uint glb_sz = get_global_size(0); \n");
+
+ source.append(" for (uint result_col = 0; result_col < result_col_size; ++result_col) { \n");
+ source.append(" for (uint row_id = glb_id; row_id < row_num; row_id += glb_sz) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" sum = 0; \n");
+
+ source.append(" uint offset = row_id; \n");
+ source.append(" for (uint item_id = 0; item_id < items_per_row; item_id++, offset += internal_row_num) { \n");
+ source.append(" "); source.append(numeric_string); source.append(" val = ell_elements[offset]; \n");
+
+ source.append(" if (val != ("); source.append(numeric_string); source.append(")0) { \n");
+ source.append(" int col = ell_coords[offset]; \n");
+ if (B_transposed && B_row_major)
+ source.append(" sum += d_mat[ (d_mat_row_start + result_col * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + col * d_mat_col_inc ] * val; \n");
+ else if (B_transposed && !B_row_major)
+ source.append(" sum += d_mat[ (d_mat_row_start + result_col * d_mat_row_inc) + (d_mat_col_start + col * d_mat_col_inc) * d_mat_internal_rows ] * val; \n");
+ else if (!B_transposed && B_row_major)
+ source.append(" sum += d_mat[ (d_mat_row_start + col * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + result_col * d_mat_col_inc ] * val; \n");
+ else
+ source.append(" sum += d_mat[ (d_mat_row_start + col * d_mat_row_inc) + (d_mat_col_start + result_col * d_mat_col_inc) * d_mat_internal_rows ] * val; \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+
+ source.append(" uint col_begin = csr_rows[row_id]; \n");
+ source.append(" uint col_end = csr_rows[row_id + 1]; \n");
+
+ source.append(" for (uint item_id = col_begin; item_id < col_end; item_id++) { \n");
+ if (B_transposed && B_row_major)
+ source.append(" sum += d_mat[ (d_mat_row_start + result_col * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + csr_cols[item_id] * d_mat_col_inc ] * csr_elements[item_id]; \n");
+ else if (B_transposed && !B_row_major)
+ source.append(" sum += d_mat[ (d_mat_row_start + result_col * d_mat_row_inc) + (d_mat_col_start + csr_cols[item_id] * d_mat_col_inc) * d_mat_internal_rows ] * csr_elements[item_id]; \n");
+ else if (!B_transposed && B_row_major)
+ source.append(" sum += d_mat[ (d_mat_row_start + csr_cols[item_id] * d_mat_row_inc) * d_mat_internal_cols + d_mat_col_start + result_col * d_mat_col_inc ] * csr_elements[item_id]; \n");
+ else
+ source.append(" sum += d_mat[ (d_mat_row_start + csr_cols[item_id] * d_mat_row_inc) + (d_mat_col_start + result_col * d_mat_col_inc) * d_mat_internal_rows ] * csr_elements[item_id]; \n");
+ source.append(" } \n");
+
+ if (C_row_major)
+ source.append(" result[ (result_row_start + row_id * result_row_inc) * result_internal_cols + result_col_start + result_col * result_col_inc ] = sum; \n");
+ else
+ source.append(" result[ (result_row_start + row_id * result_row_inc) + (result_col_start + result_col * result_col_inc) * result_internal_rows ] = sum; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+ source.append("} \n");
+ }
+}
+
+template<typename StringT>
+void generate_hyb_matrix_dense_matrix_multiplication(StringT & source, std::string const & numeric_string)
+{
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, false, false, false);
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, false, false, true);
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, false, true, false);
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, false, true, true);
+
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, true, false, false);
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, true, false, true);
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, true, true, false);
+ detail::generate_hyb_matrix_dense_matrix_mul(source, numeric_string, true, true, true);
+}
+
+//////////////////////////// Part 2: Main kernel class ////////////////////////////////////
+
+// main kernel class
+/** @brief Main kernel class for generating OpenCL kernels for hyb_matrix. */
+template<typename NumericT>
+struct hyb_matrix
+{
+ static std::string program_name()
+ {
+ return viennacl::ocl::type_to_string<NumericT>::apply() + "_hyb_matrix";
+ }
+
+ static void init(viennacl::ocl::context & ctx)
+ {
+ static std::map<cl_context, bool> init_done;
+ if (!init_done[ctx.handle().get()])
+ {
+ viennacl::ocl::DOUBLE_PRECISION_CHECKER<NumericT>::apply(ctx);
+ std::string numeric_string = viennacl::ocl::type_to_string<NumericT>::apply();
+
+ std::string source;
+ source.reserve(1024);
+
+ viennacl::ocl::append_double_precision_pragma<NumericT>(ctx, source);
+
+ generate_hyb_vec_mul(source, numeric_string, true);
+ generate_hyb_vec_mul(source, numeric_string, false);
+ generate_hyb_matrix_dense_matrix_multiplication(source, numeric_string);
+
+ std::string prog_name = program_name();
+ #ifdef VIENNACL_BUILD_INFO
+ std::cout << "Creating program " << prog_name << std::endl;
+ #endif
+ ctx.add_program(source, prog_name);
+ init_done[ctx.handle().get()] = true;
+ } //if
+ } //init
+};
+
+} // namespace kernels
+} // namespace opencl
+} // namespace linalg
+} // namespace viennacl
+#endif
+
http://git-wip-us.apache.org/repos/asf/mahout/blob/f7c1f802/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ilu.hpp
----------------------------------------------------------------------
diff --git a/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ilu.hpp b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ilu.hpp
new file mode 100644
index 0000000..bef778c
--- /dev/null
+++ b/native-viennaCL/src/main/cpp/viennacl/linalg/opencl/kernels/ilu.hpp
@@ -0,0 +1,505 @@
+#ifndef VIENNACL_LINALG_OPENCL_KERNELS_ILU_HPP
+#define VIENNACL_LINALG_OPENCL_KERNELS_ILU_HPP
+
+/* =========================================================================
+ Copyright (c) 2010-2016, Institute for Microelectronics,
+ Institute for Analysis and Scientific Computing,
+ TU Wien.
+ Portions of this software are copyright by UChicago Argonne, LLC.
+
+ -----------------
+ ViennaCL - The Vienna Computing Library
+ -----------------
+
+ Project Head: Karl Rupp rupp@iue.tuwien.ac.at
+
+ (A list of authors and contributors can be found in the manual)
+
+ License: MIT (X11), see file LICENSE in the base directory
+============================================================================= */
+
+#include "viennacl/tools/tools.hpp"
+#include "viennacl/ocl/kernel.hpp"
+#include "viennacl/ocl/platform.hpp"
+#include "viennacl/ocl/utils.hpp"
+
+/** @file viennacl/linalg/opencl/kernels/ilu.hpp
+ * @brief OpenCL kernel file for nonnegative matrix factorization */
+namespace viennacl
+{
+namespace linalg
+{
+namespace opencl
+{
+namespace kernels
+{
+template<typename StringT>
+void generate_ilu_level_scheduling_substitute(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void level_scheduling_substitute( \n");
+ source.append(" __global const unsigned int * row_index_array, \n");
+ source.append(" __global const unsigned int * row_indices, \n");
+ source.append(" __global const unsigned int * column_indices, \n");
+ source.append(" __global const "); source.append(numeric_string); source.append(" * elements, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" * vec, \n");
+ source.append(" unsigned int size) \n");
+ source.append("{ \n");
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < size; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int eq_row = row_index_array[row]; \n");
+ source.append(" "); source.append(numeric_string); source.append(" vec_entry = vec[eq_row]; \n");
+ source.append(" unsigned int row_end = row_indices[row+1]; \n");
+
+ source.append(" for (unsigned int j = row_indices[row]; j < row_end; ++j) \n");
+ source.append(" vec_entry -= vec[column_indices[j]] * elements[j]; \n");
+
+ source.append(" vec[eq_row] = vec_entry; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+///////////// ICC ///////////////
+
+
+template<typename StringT>
+void generate_icc_extract_L_1(StringT & source)
+{
+ source.append("__kernel void extract_L_1( \n");
+ source.append(" __global unsigned int const *A_row_indices, \n");
+ source.append(" __global unsigned int const *A_col_indices, \n");
+ source.append(" unsigned int A_size1, \n");
+ source.append(" __global unsigned int *L_row_indices) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < A_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int row_begin = A_row_indices[row]; \n");
+ source.append(" unsigned int row_end = A_row_indices[row+1]; \n");
+
+ source.append(" unsigned int num_entries_L = 0; \n");
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) { \n");
+ source.append(" unsigned int col = A_col_indices[j]; \n");
+ source.append(" if (col <= row) ++num_entries_L; \n");
+ source.append(" } \n");
+
+ source.append(" L_row_indices[row] = num_entries_L; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+template<typename StringT>
+void generate_icc_extract_L_2(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void extract_L_2( \n");
+ source.append(" __global unsigned int const *A_row_indices, \n");
+ source.append(" __global unsigned int const *A_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" const *A_elements, \n");
+ source.append(" unsigned int A_size1, \n");
+ source.append(" __global unsigned int const *L_row_indices, \n");
+ source.append(" __global unsigned int *L_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *L_elements) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < A_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int row_begin = A_row_indices[row]; \n");
+ source.append(" unsigned int row_end = A_row_indices[row+1]; \n");
+
+ source.append(" unsigned int index_L = L_row_indices[row]; \n");
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) { \n");
+ source.append(" unsigned int col = A_col_indices[j]; \n");
+ source.append(" "); source.append(numeric_string); source.append(" value = A_elements[j]; \n");
+
+ source.append(" if (col <= row) { \n");
+ source.append(" L_col_indices[index_L] = col; \n");
+ source.append(" L_elements[index_L] = value; \n");
+ source.append(" ++index_L; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+
+template<typename StringT>
+void generate_icc_chow_patel_sweep_kernel(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void icc_chow_patel_sweep_kernel( \n");
+ source.append(" __global unsigned int const *L_row_indices, \n");
+ source.append(" __global unsigned int const *L_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *L_elements, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" const *L_backup, \n");
+ source.append(" unsigned int L_size1, \n");
+
+ source.append(" __global "); source.append(numeric_string); source.append(" const *aij_L) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < L_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+
+ //
+ // Update L:
+ //
+ source.append(" unsigned int row_Li_start = L_row_indices[row]; \n");
+ source.append(" unsigned int row_Li_end = L_row_indices[row + 1]; \n");
+
+ source.append(" for (unsigned int i = row_Li_start; i < row_Li_end; ++i) { \n");
+ source.append(" unsigned int col = L_col_indices[i]; \n");
+
+ source.append(" unsigned int row_Lj_start = L_row_indices[col]; \n");
+ source.append(" unsigned int row_Lj_end = L_row_indices[col + 1]; \n");
+
+ source.append(" unsigned int index_Lj = row_Lj_start; \n");
+ source.append(" unsigned int col_Lj = L_col_indices[index_Lj]; \n");
+
+ source.append(" "); source.append(numeric_string); source.append(" s = aij_L[i]; \n");
+ source.append(" for (unsigned int index_Li = row_Li_start; index_Li < i; ++index_Li) { \n");
+ source.append(" unsigned int col_Li = L_col_indices[index_Li]; \n");
+
+ source.append(" while (col_Lj < col_Li) { \n");
+ source.append(" ++index_Lj; \n");
+ source.append(" col_Lj = L_col_indices[index_Lj]; \n");
+ source.append(" } \n");
+
+ source.append(" if (col_Lj == col_Li) \n");
+ source.append(" s -= L_backup[index_Li] * L_backup[index_Lj]; \n");
+ source.append(" } \n");
+
+ // update l_ij:
+ source.append(" L_elements[i] = (row == col) ? sqrt(s) : (s / L_backup[row_Lj_end - 1]); \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+
+///////////// ILU ///////////////
+
+template<typename StringT>
+void generate_ilu_extract_LU_1(StringT & source)
+{
+ source.append("__kernel void extract_LU_1( \n");
+ source.append(" __global unsigned int const *A_row_indices, \n");
+ source.append(" __global unsigned int const *A_col_indices, \n");
+ source.append(" unsigned int A_size1, \n");
+ source.append(" __global unsigned int *L_row_indices, \n");
+ source.append(" __global unsigned int *U_row_indices) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < A_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int row_begin = A_row_indices[row]; \n");
+ source.append(" unsigned int row_end = A_row_indices[row+1]; \n");
+
+ source.append(" unsigned int num_entries_L = 0; \n");
+ source.append(" unsigned int num_entries_U = 0; \n");
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) { \n");
+ source.append(" unsigned int col = A_col_indices[j]; \n");
+ source.append(" if (col <= row) ++num_entries_L; \n");
+ source.append(" if (col >= row) ++num_entries_U; \n");
+ source.append(" } \n");
+
+ source.append(" L_row_indices[row] = num_entries_L; \n");
+ source.append(" U_row_indices[row] = num_entries_U; \n");
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+template<typename StringT>
+void generate_ilu_extract_LU_2(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void extract_LU_2( \n");
+ source.append(" __global unsigned int const *A_row_indices, \n");
+ source.append(" __global unsigned int const *A_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" const *A_elements, \n");
+ source.append(" unsigned int A_size1, \n");
+ source.append(" __global unsigned int const *L_row_indices, \n");
+ source.append(" __global unsigned int *L_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *L_elements, \n");
+ source.append(" __global unsigned int const *U_row_indices, \n");
+ source.append(" __global unsigned int *U_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *U_elements) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < A_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int row_begin = A_row_indices[row]; \n");
+ source.append(" unsigned int row_end = A_row_indices[row+1]; \n");
+
+ source.append(" unsigned int index_L = L_row_indices[row]; \n");
+ source.append(" unsigned int index_U = U_row_indices[row]; \n");
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) { \n");
+ source.append(" unsigned int col = A_col_indices[j]; \n");
+ source.append(" "); source.append(numeric_string); source.append(" value = A_elements[j]; \n");
+
+ source.append(" if (col <= row) { \n");
+ source.append(" L_col_indices[index_L] = col; \n");
+ source.append(" L_elements[index_L] = value; \n");
+ source.append(" ++index_L; \n");
+ source.append(" } \n");
+ source.append(" if (col >= row) { \n");
+ source.append(" U_col_indices[index_U] = col; \n");
+ source.append(" U_elements[index_U] = value; \n");
+ source.append(" ++index_U; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+template<typename StringT>
+void generate_ilu_scale_kernel_1(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void ilu_scale_kernel_1( \n");
+ source.append(" __global unsigned int const *A_row_indices, \n");
+ source.append(" __global unsigned int const *A_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" const *A_elements, \n");
+ source.append(" unsigned int A_size1, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *D_elements) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < A_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int row_begin = A_row_indices[row]; \n");
+ source.append(" unsigned int row_end = A_row_indices[row+1]; \n");
+
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) { \n");
+ source.append(" unsigned int col = A_col_indices[j]; \n");
+
+ source.append(" if (col == row) { \n");
+ source.append(" D_elements[row] = 1 / sqrt(fabs(A_elements[j])); \n");
+ source.append(" break; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+template<typename StringT>
+void generate_ilu_scale_kernel_2(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void ilu_scale_kernel_2( \n");
+ source.append(" __global unsigned int const *R_row_indices, \n");
+ source.append(" __global unsigned int const *R_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *R_elements, \n");
+ source.append(" unsigned int R_size1, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" const *D_elements) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < R_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int row_begin = R_row_indices[row]; \n");
+ source.append(" unsigned int row_end = R_row_indices[row+1]; \n");
+
+ source.append(" "); source.append(numeric_string); source.append(" D_row = D_elements[row]; \n");
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) \n");
+ source.append(" R_elements[j] *= D_row * D_elements[R_col_indices[j]]; \n");
+
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+template<typename StringT>
+void generate_ilu_chow_patel_sweep_kernel(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void ilu_chow_patel_sweep_kernel( \n");
+ source.append(" __global unsigned int const *L_row_indices, \n");
+ source.append(" __global unsigned int const *L_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *L_elements, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" const *L_backup, \n");
+ source.append(" unsigned int L_size1, \n");
+
+ source.append(" __global "); source.append(numeric_string); source.append(" const *aij_L, \n");
+
+ source.append(" __global unsigned int const *U_trans_row_indices, \n");
+ source.append(" __global unsigned int const *U_trans_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *U_trans_elements, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" const *U_trans_backup, \n");
+
+ source.append(" __global "); source.append(numeric_string); source.append(" const *aij_U_trans) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < L_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+
+ //
+ // Update L:
+ //
+ source.append(" unsigned int row_L_start = L_row_indices[row]; \n");
+ source.append(" unsigned int row_L_end = L_row_indices[row + 1]; \n");
+
+ source.append(" for (unsigned int j = row_L_start; j < row_L_end; ++j) { \n");
+ source.append(" unsigned int col = L_col_indices[j]; \n");
+
+ source.append(" if (col == row) continue; \n");
+
+ source.append(" unsigned int row_U_start = U_trans_row_indices[col]; \n");
+ source.append(" unsigned int row_U_end = U_trans_row_indices[col + 1]; \n");
+
+ source.append(" unsigned int index_U = row_U_start; \n");
+ source.append(" unsigned int col_U = (index_U < row_U_end) ? U_trans_col_indices[index_U] : L_size1; \n");
+
+ source.append(" "); source.append(numeric_string); source.append(" sum = 0; \n");
+ source.append(" for (unsigned int k = row_L_start; k < j; ++k) { \n");
+ source.append(" unsigned int col_L = L_col_indices[k]; \n");
+
+ source.append(" while (col_U < col_L) { \n");
+ source.append(" ++index_U; \n");
+ source.append(" col_U = U_trans_col_indices[index_U]; \n");
+ source.append(" } \n");
+
+ source.append(" if (col_U == col_L) \n");
+ source.append(" sum += L_backup[k] * U_trans_backup[index_U]; \n");
+ source.append(" } \n");
+
+ // update l_ij:
+ source.append(" L_elements[j] = (aij_L[j] - sum) / U_trans_backup[row_U_end - 1]; \n");
+ source.append(" } \n");
+
+ //
+ // Update U:
+ //
+ source.append(" unsigned int row_U_start = U_trans_row_indices[row]; \n");
+ source.append(" unsigned int row_U_end = U_trans_row_indices[row + 1]; \n");
+
+ source.append(" for (unsigned int j = row_U_start; j < row_U_end; ++j) { \n");
+ source.append(" unsigned int col = U_trans_col_indices[j]; \n");
+
+ source.append(" row_L_start = L_row_indices[col]; \n");
+ source.append(" row_L_end = L_row_indices[col + 1]; \n");
+
+ // compute \sum_{k=1}^{j-1} l_ik u_kj
+ source.append(" unsigned int index_L = row_L_start; \n");
+ source.append(" unsigned int col_L = (index_L < row_L_end) ? L_col_indices[index_L] : L_size1; \n");
+ source.append(" "); source.append(numeric_string); source.append(" sum = 0; \n");
+ source.append(" for (unsigned int k = row_U_start; k < j; ++k) { \n");
+ source.append(" unsigned int col_U = U_trans_col_indices[k]; \n");
+
+ // find element in L:
+ source.append(" while (col_L < col_U) { \n");
+ source.append(" ++index_L; \n");
+ source.append(" col_L = L_col_indices[index_L]; \n");
+ source.append(" } \n");
+
+ source.append(" if (col_U == col_L) \n");
+ source.append(" sum += L_backup[index_L] * U_trans_backup[k]; \n");
+ source.append(" } \n");
+
+ // update U_ij:
+ source.append(" U_trans_elements[j] = aij_U_trans[j] - sum; \n");
+ source.append(" } \n");
+
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+
+template<typename StringT>
+void generate_ilu_form_neumann_matrix_kernel(StringT & source, std::string const & numeric_string)
+{
+ source.append("__kernel void ilu_form_neumann_matrix_kernel( \n");
+ source.append(" __global unsigned int const *R_row_indices, \n");
+ source.append(" __global unsigned int const *R_col_indices, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *R_elements, \n");
+ source.append(" unsigned int R_size1, \n");
+ source.append(" __global "); source.append(numeric_string); source.append(" *D_elements) { \n");
+
+ source.append(" for (unsigned int row = get_global_id(0); \n");
+ source.append(" row < R_size1; \n");
+ source.append(" row += get_global_size(0)) \n");
+ source.append(" { \n");
+ source.append(" unsigned int row_begin = R_row_indices[row]; \n");
+ source.append(" unsigned int row_end = R_row_indices[row+1]; \n");
+
+ // Part 1: Extract and set diagonal entry
+ source.append(" "); source.append(numeric_string); source.append(" diag = D_elements[row]; \n");
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) { \n");
+ source.append(" unsigned int col = R_col_indices[j]; \n");
+ source.append(" if (col == row) { \n");
+ source.append(" diag = R_elements[j]; \n");
+ source.append(" R_elements[j] = 0; \n");
+ source.append(" break; \n");
+ source.append(" } \n");
+ source.append(" } \n");
+ source.append(" D_elements[row] = diag; \n");
+
+ // Part 2: Scale
+ source.append(" for (unsigned int j=row_begin; j<row_end; ++j) \n");
+ source.append(" R_elements[j] /= -diag; \n");
+
+ source.append(" } \n");
+ source.append("} \n");
+}
+
+
+
+// main kernel class
+/** @brief Main kernel class for generating OpenCL kernels for incomplete LU factorization preconditioners. */
+template<class NumericT>
+struct ilu
+{
+ static std::string program_name()
+ {
+ return viennacl::ocl::type_to_string<NumericT>::apply() + "_ilu";
+ }
+
+ static void init(viennacl::ocl::context & ctx)
+ {
+ static std::map<cl_context, bool> init_done;
+ if (!init_done[ctx.handle().get()])
+ {
+ viennacl::ocl::DOUBLE_PRECISION_CHECKER<NumericT>::apply(ctx);
+ std::string numeric_string = viennacl::ocl::type_to_string<NumericT>::apply();
+
+ std::string source;
+ source.reserve(1024);
+
+ viennacl::ocl::append_double_precision_pragma<NumericT>(ctx, source);
+
+ // only generate for floating points (forces error for integers)
+ if (numeric_string == "float" || numeric_string == "double")
+ {
+ generate_ilu_level_scheduling_substitute(source, numeric_string);
+
+ generate_icc_extract_L_1(source);
+ generate_icc_extract_L_2(source, numeric_string);
+ generate_icc_chow_patel_sweep_kernel(source, numeric_string);
+
+ generate_ilu_extract_LU_1(source);
+ generate_ilu_extract_LU_2(source, numeric_string);
+ generate_ilu_scale_kernel_1(source, numeric_string);
+ generate_ilu_scale_kernel_2(source, numeric_string);
+ generate_ilu_chow_patel_sweep_kernel(source, numeric_string);
+ generate_ilu_form_neumann_matrix_kernel(source, numeric_string);
+ }
+
+ std::string prog_name = program_name();
+ #ifdef VIENNACL_BUILD_INFO
+ std::cout << "Creating program " << prog_name << std::endl;
+ #endif
+ ctx.add_program(source, prog_name);
+ init_done[ctx.handle().get()] = true;
+ } //if
+ } //init
+};
+
+} // namespace kernels
+} // namespace opencl
+} // namespace linalg
+} // namespace viennacl
+#endif
+