1 #ifndef VIENNACL_SLICED_ELL_MATRIX_HPP_
2 #define VIENNACL_SLICED_ELL_MATRIX_HPP_
45 template<
typename ScalarT,
typename IndexT >
46 class sliced_ell_matrix
60 rows_per_block_(num_rows_per_block_) {}
69 #ifdef VIENNACL_WITH_OPENCL
72 columns_per_block_.opencl_handle().context(ctx.opencl_context());
73 column_indices_.opencl_handle().context(ctx.opencl_context());
74 block_start_.opencl_handle().context(ctx.opencl_context());
75 elements_.opencl_handle().context(ctx.opencl_context());
86 std::vector<ScalarT> host_elements(1);
117 #if defined(_MSC_VER) && _MSC_VER < 1500 //Visual Studio 2005 needs special treatment
118 template<
typename CPUMatrixT>
121 template<
typename CPUMatrixT,
typename ScalarT2,
typename IndexT2>
136 template<
typename CPUMatrixT,
typename ScalarT,
typename IndexT>
139 assert( (gpu_matrix.size1() == 0 ||
viennacl::traits::size1(cpu_matrix) == gpu_matrix.size1()) &&
bool(
"Size mismatch") );
140 assert( (gpu_matrix.size2() == 0 ||
viennacl::traits::size2(cpu_matrix) == gpu_matrix.size2()) &&
bool(
"Size mismatch") );
142 if (gpu_matrix.rows_per_block() == 0)
145 gpu_matrix.rows_per_block_ = 128;
148 gpu_matrix.rows_per_block_ = 256;
151 #ifdef VIENNACL_WITH_OPENCL
153 gpu_matrix.rows_per_block_ = 256;
161 IndexT columns_in_current_block = 0;
165 for (
typename CPUMatrixT::const_iterator1 row_it = cpu_matrix.begin1(); row_it != cpu_matrix.end1(); ++row_it)
167 ++row_counter_in_current_block;
169 for (
typename CPUMatrixT::const_iterator2 col_it = row_it.begin(); col_it != row_it.end(); ++col_it)
172 columns_in_current_block =
std::max(columns_in_current_block, static_cast<IndexT>(entries_in_row));
174 if ( (row_it.index1() % gpu_matrix.rows_per_block() == gpu_matrix.rows_per_block() - 1)
177 total_element_buffer_size += columns_in_current_block * gpu_matrix.rows_per_block();
178 columns_in_block_buffer.set(row_it.index1() / gpu_matrix.rows_per_block(), columns_in_current_block);
179 columns_in_current_block = 0;
184 gpu_matrix.rows_ = cpu_matrix.size1();
185 gpu_matrix.cols_ = cpu_matrix.size2();
189 std::vector<ScalarT> elements(total_element_buffer_size, 0);
194 for (
typename CPUMatrixT::const_iterator1 row_it = cpu_matrix.begin1(); row_it != cpu_matrix.end1(); ++row_it)
198 for (
typename CPUMatrixT::const_iterator2 col_it = row_it.begin(); col_it != row_it.end(); ++col_it)
200 vcl_size_t buffer_index = block_offset + entry_in_row * gpu_matrix.rows_per_block() + row_in_block;
201 coords.set(buffer_index, col_it.index2());
202 elements[buffer_index] = *col_it;
209 if ( (row_it.index1() % gpu_matrix.rows_per_block() == gpu_matrix.rows_per_block() - 1)
212 block_start.
set(block_index, static_cast<IndexT>(block_offset));
213 block_offset += columns_in_block_buffer[block_index] * gpu_matrix.rows_per_block();
233 template<
typename IndexT,
typename NumericT,
typename IndexT2>
234 void copy(std::vector< std::map<IndexT, NumericT> >
const & cpu_matrix,
290 template<
typename ScalarT,
typename IndexT>
291 struct op_executor<vector_base<ScalarT>,
op_assign, vector_expression<const sliced_ell_matrix<ScalarT, IndexT>, const vector_base<ScalarT>, op_prod> >
293 static void apply(vector_base<ScalarT> & lhs, vector_expression<
const sliced_ell_matrix<ScalarT, IndexT>,
const vector_base<ScalarT>, op_prod>
const & rhs)
307 template<
typename ScalarT,
typename IndexT>
308 struct op_executor<vector_base<ScalarT>, op_inplace_add, vector_expression<const sliced_ell_matrix<ScalarT, IndexT>, const vector_base<ScalarT>, op_prod> >
310 static void apply(vector_base<ScalarT> & lhs, vector_expression<
const sliced_ell_matrix<ScalarT, IndexT>,
const vector_base<ScalarT>, op_prod>
const & rhs)
318 template<
typename ScalarT,
typename IndexT>
319 struct op_executor<vector_base<ScalarT>, op_inplace_sub, vector_expression<const sliced_ell_matrix<ScalarT, IndexT>, const vector_base<ScalarT>, op_prod> >
321 static void apply(vector_base<ScalarT> & lhs, vector_expression<
const sliced_ell_matrix<ScalarT, IndexT>,
const vector_base<ScalarT>, op_prod>
const & rhs)
331 template<
typename ScalarT,
typename IndexT,
typename LHS,
typename RHS,
typename OP>
332 struct op_executor<vector_base<ScalarT>,
op_assign, vector_expression<const sliced_ell_matrix<ScalarT, IndexT>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
334 static void apply(vector_base<ScalarT> & lhs, vector_expression<
const sliced_ell_matrix<ScalarT, IndexT>,
const vector_expression<const LHS, const RHS, OP>, op_prod>
const & rhs)
342 template<
typename ScalarT,
typename IndexT,
typename LHS,
typename RHS,
typename OP>
343 struct op_executor<vector_base<ScalarT>, op_inplace_add, vector_expression<const sliced_ell_matrix<ScalarT, IndexT>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
345 static void apply(vector_base<ScalarT> & lhs, vector_expression<
const sliced_ell_matrix<ScalarT, IndexT>,
const vector_expression<const LHS, const RHS, OP>, op_prod>
const & rhs)
355 template<
typename ScalarT,
typename IndexT,
typename LHS,
typename RHS,
typename OP>
356 struct op_executor<vector_base<ScalarT>, op_inplace_sub, vector_expression<const sliced_ell_matrix<ScalarT, IndexT>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
358 static void apply(vector_base<ScalarT> & lhs, vector_expression<
const sliced_ell_matrix<ScalarT, IndexT>,
const vector_expression<const LHS, const RHS, OP>, op_prod>
const & rhs)
void clear()
Resets all entries in the matrix back to zero without changing the matrix size. Resets the sparsity p...
const handle_type & handle3() const
Helper class implementing an array on the host. Default case: No conversion necessary.
vcl_size_t element_size() const
This class represents a single scalar value on the GPU and behaves mostly like a built-in scalar type...
const handle_type & handle1() const
vcl_size_t size1(MatrixType const &mat)
Generic routine for obtaining the number of rows of a matrix (ViennaCL, uBLAS, etc.)
const handle_type & handle2() const
This file provides the forward declarations for the main types used within ViennaCL.
T max(const T &lhs, const T &rhs)
Maximum.
vcl_size_t rows_per_block() const
friend void copy(CPUMatrixT const &cpu_matrix, sliced_ell_matrix< ScalarT2, IndexT2 > &gpu_matrix)
scalar< typename viennacl::tools::CHECK_SCALAR_TEMPLATE_ARGUMENT< ScalarT >::ResultType > value_type
result_of::size_type< MatrixType >::type size2(MatrixType const &mat)
Generic routine for obtaining the number of columns of a matrix (ViennaCL, uBLAS, etc...
Represents a generic 'context' similar to an OpenCL context, but is backend-agnostic and thus also su...
sliced_ell_matrix(viennacl::context ctx)
Sparse matrix class using the sliced ELLPACK with parameters C, .
Implementations of operations using sparse matrices.
sliced_ell_matrix(size_type num_rows, size_type num_cols, size_type num_rows_per_block_=0)
viennacl::memory_types memory_type() const
const handle_type & handle() const
vcl_size_t internal_size2() const
void switch_active_handle_id(memory_types new_id)
Switches the currently active handle. If no support for that backend is provided, an exception is thr...
viennacl::context context(T const &t)
Returns an ID for the currently active memory domain of an object.
The vector type with operator-overloads and proxy classes is defined here. Linear algebra operations ...
void copy(std::vector< NumericT > &cpu_vec, circulant_matrix< NumericT, AlignmentV > &gpu_mat)
Copies a circulant matrix from the std::vector to the OpenCL device (either GPU or multi-core CPU) ...
void set(vcl_size_t index, U value)
Main abstraction class for multiple memory domains. Represents a buffer in either main RAM...
void memory_create(mem_handle &handle, vcl_size_t size_in_bytes, viennacl::context const &ctx, const void *host_ptr=NULL)
Creates an array of the specified size. If the second argument is provided, the buffer is initialized...
void prod_impl(const matrix_base< NumericT > &mat, const vector_base< NumericT > &vec, vector_base< NumericT > &result)
Carries out matrix-vector multiplication.
viennacl::backend::mem_handle & handle(T &obj)
Returns the generic memory handle of an object. Non-const version.
vcl_size_t internal_size1() const
viennacl::backend::mem_handle handle_type