ViennaCL - The Vienna Computing Library  1.6.2
Free open-source GPU-accelerated linear algebra and solver library.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
amg_base.hpp
Go to the documentation of this file.
1 #ifndef VIENNACL_LINALG_DETAIL_AMG_AMG_BASE_HPP_
2 #define VIENNACL_LINALG_DETAIL_AMG_AMG_BASE_HPP_
3 
4 /* =========================================================================
5  Copyright (c) 2010-2014, Institute for Microelectronics,
6  Institute for Analysis and Scientific Computing,
7  TU Wien.
8  Portions of this software are copyright by UChicago Argonne, LLC.
9 
10  -----------------
11  ViennaCL - The Vienna Computing Library
12  -----------------
13 
14  Project Head: Karl Rupp rupp@iue.tuwien.ac.at
15 
16  (A list of authors and contributors can be found in the PDF manual)
17 
18  License: MIT (X11), see file LICENSE in the base directory
19 ============================================================================= */
20 
27 #include <boost/numeric/ublas/operation.hpp>
28 #include <boost/numeric/ublas/vector.hpp>
29 #include <cmath>
30 #include <set>
31 #include <list>
32 #include <algorithm>
33 
34 #include <map>
35 #ifdef VIENNACL_WITH_OPENMP
36 #include <omp.h>
37 #endif
38 
39 #include "amg_debug.hpp"
40 
41 #define VIENNACL_AMG_COARSE_RS 1
42 #define VIENNACL_AMG_COARSE_ONEPASS 2
43 #define VIENNACL_AMG_COARSE_RS0 3
44 #define VIENNACL_AMG_COARSE_RS3 4
45 #define VIENNACL_AMG_COARSE_AG 5
46 #define VIENNACL_AMG_INTERPOL_DIRECT 1
47 #define VIENNACL_AMG_INTERPOL_CLASSIC 2
48 #define VIENNACL_AMG_INTERPOL_AG 3
49 #define VIENNACL_AMG_INTERPOL_SA 4
50 
51 namespace viennacl
52 {
53 namespace linalg
54 {
55 namespace detail
56 {
57 namespace amg
58 {
61 class amg_tag
62 {
63 public:
76  amg_tag(unsigned int coarse = 1,
77  unsigned int interpol = 1,
78  double threshold = 0.25,
79  double interpolweight = 0.2,
80  double jacobiweight = 1,
81  unsigned int presmooth = 1,
82  unsigned int postsmooth = 1,
83  unsigned int coarselevels = 0)
84  : coarse_(coarse), interpol_(interpol),
85  threshold_(threshold), interpolweight_(interpolweight), jacobiweight_(jacobiweight),
86  presmooth_(presmooth), postsmooth_(postsmooth), coarselevels_(coarselevels) {}
87 
88  // Getter-/Setter-Functions
89  void set_coarse(unsigned int coarse) { coarse_ = coarse; }
90  unsigned int get_coarse() const { return coarse_; }
91 
92  void set_interpol(unsigned int interpol) { interpol_ = interpol; }
93  unsigned int get_interpol() const { return interpol_; }
94 
95  void set_threshold(double threshold) { if (threshold > 0 && threshold <= 1) threshold_ = threshold; }
96  double get_threshold() const { return threshold_; }
97 
98  void set_as(double jacobiweight) { if (jacobiweight > 0 && jacobiweight <= 2) jacobiweight_ = jacobiweight; }
99  double get_interpolweight() const { return interpolweight_; }
100 
101  void set_interpolweight(double interpolweight) { if (interpolweight > 0 && interpolweight <= 2) interpolweight_ = interpolweight; }
102  double get_jacobiweight() const { return jacobiweight_; }
103 
104  void set_presmooth(unsigned int presmooth) { presmooth_ = presmooth; }
105  unsigned int get_presmooth() const { return presmooth_; }
106 
107  void set_postsmooth(unsigned int postsmooth) { postsmooth_ = postsmooth; }
108  unsigned int get_postsmooth() const { return postsmooth_; }
109 
110  void set_coarselevels(unsigned int coarselevels) { coarselevels_ = coarselevels; }
111  unsigned int get_coarselevels() const { return coarselevels_; }
112 
113 private:
114  unsigned int coarse_, interpol_;
115  double threshold_, interpolweight_, jacobiweight_;
116  unsigned int presmooth_, postsmooth_, coarselevels_;
117 };
118 
123 template<typename InternalT, typename IteratorT, typename NumericT>
125 {
126 private:
127  InternalT *m_;
128  IteratorT iter_;
129  unsigned int i_,j_;
130  NumericT s_;
131 
132  template <typename T>
133  bool is_zero(T value) const { return value <= 0 && value >= 0; }
134 
135  template <typename T>
136  bool is_zero(T * value) const { return value == NULL; }
137 
138 public:
140 
148  amg_nonzero_scalar(InternalT *m,
149  IteratorT & iter,
150  unsigned int i,
151  unsigned int j,
152  NumericT s = 0): m_(m), iter_(iter), i_(i), j_(j), s_(s) {}
153 
157  NumericT operator=(const NumericT value)
158  {
159  s_ = value;
160  // Only write if scalar is nonzero
161  if (is_zero(s_)) return s_;
162  // Write to m_ using iterator iter_ or indices (i_,j_)
163  m_->addscalar(iter_,i_,j_,s_);
164  return s_;
165  }
166 
170  NumericT operator+=(const NumericT value)
171  {
172  // If zero is added, then no change necessary
173  if (is_zero(value))
174  return s_;
175 
176  s_ += value;
177  // Remove entry if resulting scalar is zero
178  if (is_zero(s_))
179  {
180  m_->removescalar(iter_,i_);
181  return s_;
182  }
183  //Write to m_ using iterator iter_ or indices (i_,j_)
184  m_->addscalar(iter_,i_,j_,s_);
185  return s_;
186  }
187  NumericT operator++(int)
188  {
189  s_++;
190  if (is_zero(s_))
191  m_->removescalar(iter_,i_);
192  m_->addscalar (iter_,i_,j_,s_);
193  return s_;
194  }
195  NumericT operator++()
196  {
197  s_++;
198  if (is_zero(s_))
199  m_->removescalar(iter_,i_);
200  m_->addscalar(iter_,i_,j_,s_);
201  return s_;
202  }
203  operator NumericT(void) { return s_; }
204 };
205 
208 template<typename InternalT>
210 {
211 private:
213  typedef typename InternalT::mapped_type ScalarType;
214 
215  InternalT & internal_vec_;
216  typename InternalT::iterator iter_;
217 
218 public:
219 
224  amg_sparsevector_iterator(InternalT & vec, bool begin=true): internal_vec_(vec)
225  {
226  if (begin)
227  iter_ = internal_vec_.begin();
228  else
229  iter_ = internal_vec_.end();
230  }
231 
233  {
234  if (iter_ == other.iter_)
235  return true;
236  else
237  return false;
238  }
240  {
241  if (iter_ != other.iter_)
242  return true;
243  else
244  return false;
245  }
246 
247  self_type const & operator ++ () const { iter_++; return *this; }
248  self_type & operator ++ () { iter_++; return *this; }
249  self_type const & operator -- () const { iter_--; return *this; }
250  self_type & operator -- () { iter_--; return *this; }
251  ScalarType const & operator * () const { return (*iter_).second; }
252  ScalarType & operator * () { return (*iter_).second; }
253  unsigned int index() const { return (*iter_).first; }
254  unsigned int index() { return (*iter_).first; }
255 };
256 
259 template<typename NumericT>
261 {
262 public:
263  typedef NumericT value_type;
264 
265 private:
266  // A map is used internally which saves all non-zero elements with pairs of (index,value)
267  typedef std::map<unsigned int, NumericT> InternalType;
270 
271  // Size is only a dummy variable. Not needed for internal map structure but for compatible vector interface.
272  unsigned int size_;
273  InternalType internal_vector_;
274 
275  template <typename T>
276  bool is_zero(T value) const { return value <= 0 && value >= 0; }
277 
278  template <typename T>
279  bool is_zero(T * value) const { return value == NULL; }
280 
281 public:
283  typedef typename InternalType::const_iterator const_iterator;
284 
285 public:
289  amg_sparsevector(unsigned int size = 0): size_(size) {}
290 
291  void resize(unsigned int size) { size_ = size; }
292  unsigned int size() const { return size_;}
293 
294  // Returns number of non-zero entries in vector equal to the size of the underlying map.
295  unsigned int internal_size() const { return static_cast<unsigned int>(internal_vector_.size()); }
296  // Delete underlying map.
297  void clear() { internal_vector_.clear(); }
298  // Remove entry at position i.
299  void remove(unsigned int i) { internal_vector_.erase(i); }
300 
301  // Add s to the entry at position i
302  void add(unsigned int i, NumericT s)
303  {
304  typename InternalType::iterator iter = internal_vector_.find(i);
305  // If there is no entry at position i, add new entry at that position
306  if (iter == internal_vector_.end())
307  addscalar(iter,i,i,s);
308  else
309  {
310  s += (*iter).second;
311  // If new value is zero, then erase the entry, otherwise write new value
312  if (s < 0 || s > 0)
313  (*iter).second = s;
314  else
315  internal_vector_.erase(iter);
316  }
317  }
318 
319  // Write to the map. Is called from non-zero scalar type.
320  template<typename IteratorT>
321  void addscalar(IteratorT & iter, unsigned int i, unsigned int /* j */, NumericT s)
322  {
323  // Don't write if value is zero
324  if (is_zero(s))
325  return;
326 
327  // If entry is already present, overwrite value, otherwise make new entry
328  if (iter != internal_vector_.end())
329  (*iter).second = s;
330  else
331  internal_vector_[i] = s;
332  }
333 
334  // Remove value from the map. Is called from non-zero scalar type.
335  template<typename IteratorT>
336  void removescalar(IteratorT & iter, unsigned int /* i */) { internal_vector_.erase(iter); }
337 
338  // Bracket operator. Returns non-zero scalar type with actual values of the respective entry which calls addscalar/removescalar after value is altered.
340  {
341  typename InternalType::iterator it = internal_vector_.find(i);
342  // If value is already present then build non-zero scalar with actual value, otherwise 0.
343  if (it != internal_vector_.end())
344  return NonzeroScalarType(this,it,i,i,(*it).second);
345  else
346  return NonzeroScalarType(this,it,i,i,0);
347  }
348 
349  // Use internal data structure directly for read-only access. No need to use non-zero scalar as no write access possible.
350  NumericT operator [] (unsigned int i) const
351  {
352  const_iterator it = internal_vector_.find(i);
353 
354  if (it != internal_vector_.end())
355  return (*it).second;
356  else
357  return 0;
358  }
359 
360  // Iterator functions.
361  iterator begin() { return iterator(internal_vector_); }
362  const_iterator begin() const { return internal_vector_.begin(); }
363  iterator end() { return iterator(internal_vector_, false); }
364  const_iterator end() const { return internal_vector_.end(); }
365 
366  // checks whether value at index i is nonzero. More efficient than doing [] == 0.
367  bool isnonzero(unsigned int i) const { return internal_vector_.find(i) != internal_vector_.end(); }
368 
369  // Copies data into a ublas vector type.
370  operator boost::numeric::ublas::vector<NumericT>(void)
371  {
372  boost::numeric::ublas::vector<NumericT> vec (size_);
373  for (iterator iter = begin(); iter != end(); ++iter)
374  vec [iter.index()] = *iter;
375  return vec;
376  }
377 };
378 
384 template<typename NumericT>
386 {
387 public:
388  typedef NumericT value_type;
389 private:
390  typedef std::map<unsigned int,NumericT> RowType;
391  typedef std::vector<RowType> InternalType;
393 
394  // Adapter is used for certain functionality, especially iterators.
397 
398  // Non-zero scalar is used to write to the matrix.
400 
401  // Holds matrix coefficients.
402  InternalType internal_mat_;
403  // Holds matrix coefficient of transposed matrix if built.
404  // Note: Only internal_mat is written using operators and methods while internal_mat_trans is built from internal_mat using do_trans().
405  InternalType internal_mat_trans_;
406  // Saves sizes.
407  vcl_size_t s1_, s2_;
408 
409  // True if the transposed of the matrix is used (for calculations, iteration, etc.).
410  bool transposed_mode_;
411  // True if the transposed is already built (saved in internal_mat_trans) and also up to date (no changes to internal_mat).
412  bool transposed_;
413 
414 public:
419 
422  {
423  transposed_mode_ = false;
424  transposed_ = false;
425  }
426 
431  amg_sparsematrix(unsigned int i, unsigned int j)
432  {
433  AdapterType a(internal_mat_, i, j);
434  a.resize(i,j,false);
435  AdapterType a_trans(internal_mat_trans_, j, i);
436  a_trans.resize(j,i,false);
437  s1_ = i;
438  s2_ = j;
439  a.clear();
440  a_trans.clear();
441  transposed_mode_ = false;
442  transposed_ = false;
443  }
444 
449  amg_sparsematrix(std::vector<std::map<unsigned int, NumericT> > const & mat)
450  {
451  AdapterType a (internal_mat_, mat.size(), mat.size());
452  AdapterType a_trans (internal_mat_trans_, mat.size(), mat.size());
453  a.resize(mat.size(), mat.size());
454  a_trans.resize(mat.size(), mat.size());
455 
456  internal_mat_ = mat;
457  s1_ = s2_ = mat.size();
458 
459  transposed_mode_ = false;
460  transposed_ = false;
461  }
462 
467  template<typename MatrixT>
468  amg_sparsematrix(MatrixT const & mat)
469  {
470  AdapterType a(internal_mat_, mat.size1(), mat.size2());
471  AdapterType a_trans(internal_mat_trans_, mat.size2(), mat.size1());
472  a.resize(mat.size1(), mat.size2());
473  a_trans.resize(mat.size2(), mat.size1());
474  s1_ = mat.size1();
475  s2_ = mat.size2();
476  a.clear();
477  a_trans.clear();
478 
479  for (typename MatrixT::const_iterator1 row_iter = mat.begin1(); row_iter != mat.end1(); ++row_iter)
480  {
481  for (typename MatrixT::const_iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
482  {
483  if (std::fabs(*col_iter) > 0) // *col_iter != 0, but without floating point comparison warnings
484  {
485  unsigned int x = static_cast<unsigned int>(col_iter.index1());
486  unsigned int y = static_cast<unsigned int>(col_iter.index2());
487  a(x,y) = *col_iter;
488  a_trans(y,x) = *col_iter;
489  }
490  }
491  }
492  transposed_mode_ = false;
493  transposed_ = true;
494  }
495 
496  // Build transposed of the current matrix.
497  void do_trans()
498  {
499  // Do it only once if called in a parallel section
500  #ifdef VIENNACL_WITH_OPENMP
501  #pragma omp critical
502  #endif
503  {
504  // Only build transposed if it is not built or not up to date
505  if (!transposed_)
506  {
507  // Mode has to be set to standard mode temporarily
508  bool save_mode = transposed_mode_;
509  transposed_mode_ = false;
510 
511  for (iterator1 row_iter = begin1(); row_iter != end1(); ++row_iter)
512  for (iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
513  internal_mat_trans_[col_iter.index2()][static_cast<unsigned int>(col_iter.index1())] = *col_iter;
514 
515  transposed_mode_ = save_mode;
516  transposed_ = true;
517  }
518  }
519  } //do_trans()
520 
521  // Set transposed mode (true=transposed, false=regular)
522  void set_trans(bool mode)
523  {
524  transposed_mode_ = mode;
525  if (mode)
526  do_trans();
527  }
528 
529  bool get_trans() const { return transposed_mode_; }
530 
531  // Checks whether coefficient (i,j) is non-zero. More efficient than using (i,j) == 0.
532  bool isnonzero (unsigned int i, unsigned int j) const
533  {
534  if (!transposed_mode_)
535  {
536  if (internal_mat_[i].find(j) != internal_mat_[i].end())
537  return true;
538  else
539  return false;
540  }
541  else
542  {
543  if (internal_mat_[j].find(i) != internal_mat_[j].end())
544  return true;
545  else
546  return false;
547  }
548  } //isnonzero()
549 
550  // Add s to value at (i,j)
551  void add(unsigned int i, unsigned int j, NumericT s)
552  {
553  // If zero is added then do nothing.
554  if (s <= 0 && s >= 0)
555  return;
556 
557  typename RowType::iterator col_iter = internal_mat_[i].find(j);
558  // If there is no entry at position (i,j), then make new entry.
559  if (col_iter == internal_mat_[i].end())
560  addscalar(col_iter,i,j,s);
561  else
562  {
563  s += (*col_iter).second;
564  // Update value and erase entry if value is zero.
565  if (s < 0 || s > 0)
566  (*col_iter).second = s;
567  else
568  internal_mat_[i].erase(col_iter);
569  }
570  transposed_ = false;
571  } //add()
572 
573  // Write to the internal data structure. Is called from non-zero scalar type.
574  template<typename IteratorT>
575  void addscalar(IteratorT & iter, unsigned int i, unsigned int j, NumericT s)
576  {
577  // Don't write if value is zero
578  if (s >= 0 && s <= 0)
579  return;
580 
581  if (iter != internal_mat_[i].end())
582  (*iter).second = s;
583  else
584  internal_mat_[i][j] = s;
585 
586  transposed_ = false;
587  }
588 
589  // Remove entry from internal data structure. Is called from non-zero scalar type.
590  template<typename IteratorT>
591  void removescalar(IteratorT & iter, unsigned int i)
592  {
593  internal_mat_[i].erase(iter);
594  transposed_ = false;
595  }
596 
597  // Return non-zero scalar at position (i,j). Value is written to the non-zero scalar and updated via addscalar()/removescalar().
598  NonzeroScalarType operator()(unsigned int i, unsigned int j)
599  {
600  typename RowType::iterator iter;
601 
602  if (!transposed_mode_)
603  {
604  iter = internal_mat_[i].find(j);
605  if (iter != internal_mat_[i].end())
606  return NonzeroScalarType(this,iter,i,j,(*iter).second);
607  else
608  return NonzeroScalarType(this,iter,i,j,0);
609  }
610  else
611  {
612  iter = internal_mat_[j].find(i);
613  if (iter != internal_mat_[j].end())
614  return NonzeroScalarType(this,iter,j,i,(*iter).second);
615  else
616  return NonzeroScalarType(this,iter,j,i,0);
617  }
618  }
619 
620  // For read-only access return the actual value directly. Non-zero datatype not needed as no write access possible.
621  NumericT operator()(unsigned int i, unsigned int j) const
622  {
623  typename RowType::const_iterator iter;
624 
625  if (!transposed_mode_)
626  {
627  iter = internal_mat_[i].find(j);
628  if (iter != internal_mat_[i].end())
629  return (*iter).second;
630  else
631  return 0;
632  }
633  else
634  {
635  iter = internal_mat_[j].find(i);
636  if (iter != internal_mat_[j].end())
637  return (*iter).second;
638  else
639  return 0;
640  }
641  }
642 
643  void resize(unsigned int i, unsigned int j, bool preserve = true)
644  {
645  AdapterType a (internal_mat_);
646  a.resize(i,j,preserve);
647  AdapterType a_trans (internal_mat_trans_);
648  a_trans.resize(j,i,preserve);
649  s1_ = i;
650  s2_ = j;
651  }
652 
653  void clear()
654  {
655  AdapterType a(internal_mat_, s1_, s2_);
656  a.clear();
657  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
658  a_trans.clear();
659  transposed_ = true;
660  }
661 
663  {
664  if (!transposed_mode_)
665  return s1_;
666  else
667  return s2_;
668  }
669 
671  {
672  if (!transposed_mode_)
673  return s1_;
674  else
675  return s2_;
676  }
677 
678 
680  {
681  if (!transposed_mode_)
682  return s2_;
683  else
684  return s1_;
685  }
686 
688  {
689  if (!transposed_mode_)
690  return s2_;
691  else
692  return s1_;
693  }
694 
695  iterator1 begin1(bool trans = false)
696  {
697  if (!trans && !transposed_mode_)
698  {
699  AdapterType a(internal_mat_, s1_, s2_);
700  return a.begin1();
701  }
702  else
703  {
704  do_trans();
705  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
706  return a_trans.begin1();
707  }
708  }
709 
710  iterator1 end1(bool trans = false)
711  {
712  if (!trans && !transposed_mode_)
713  {
714  AdapterType a(internal_mat_, s1_, s2_);
715  return a.end1();
716  }
717  else
718  {
719  //do_trans();
720  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
721  return a_trans.end1();
722  }
723  }
724 
725  iterator2 begin2(bool trans = false)
726  {
727  if (!trans && !transposed_mode_)
728  {
729  AdapterType a(internal_mat_, s1_, s2_);
730  return a.begin2();
731  }
732  else
733  {
734  do_trans();
735  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
736  return a_trans.begin2();
737  }
738  }
739 
740  iterator2 end2(bool trans = false)
741  {
742  if (!trans && !transposed_mode_)
743  {
744  AdapterType a(internal_mat_, s1_, s2_);
745  return a.end2();
746  }
747  else
748  {
749  //do_trans();
750  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
751  return a_trans.end2();
752  }
753  }
754 
756  {
757  // Const_iterator of transposed can only be used if transposed matrix is already built and up to date.
758  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
759  ConstAdapterType a_const(internal_mat_, s1_, s2_);
760  return a_const.begin1();
761  }
762 
763  const_iterator1 end1(bool trans = false) const
764  {
765  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
766  ConstAdapterType a_const(internal_mat_, trans ? s2_ : s1_, trans ? s1_ : s2_);
767  return a_const.end1();
768  }
769 
770  const_iterator2 begin2(bool trans = false) const
771  {
772  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
773  ConstAdapterType a_const(internal_mat_, trans ? s2_ : s1_, trans ? s1_ : s2_);
774  return a_const.begin2();
775  }
776 
777  const_iterator2 end2(bool trans = false) const
778  {
779  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
780  ConstAdapterType a_const(internal_mat_, trans ? s2_ : s1_, trans ? s1_ : s2_);
781  return a_const.end2();
782  }
783 
784  // Returns pointer to the internal data structure. Improves performance of copy operation to GPU.
785  std::vector<std::map<unsigned int, NumericT> > * get_internal_pointer()
786  {
787  if (!transposed_mode_)
788  return &internal_mat_;
789 
790  if (!transposed_)
791  do_trans();
792  return &internal_mat_trans_;
793  }
794 
795  operator boost::numeric::ublas::compressed_matrix<NumericT>(void)
796  {
797  boost::numeric::ublas::compressed_matrix<NumericT> mat;
798  mat.resize(size1(), size2(), false);
799  mat.clear();
800 
801  for (iterator1 row_iter = begin1(); row_iter != end1(); ++row_iter)
802  for (iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
803  mat(col_iter.index1(), col_iter.index2()) = *col_iter;
804 
805  return mat;
806  }
807 
808  operator boost::numeric::ublas::matrix<NumericT>(void)
809  {
810  boost::numeric::ublas::matrix<NumericT> mat;
811  mat.resize(size1(), size2(), false);
812  mat.clear();
813 
814  for (iterator1 row_iter = begin1(); row_iter != end1(); ++row_iter)
815  for (iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
816  mat(col_iter.index1(), col_iter.index2()) = *col_iter;
817 
818  return mat;
819  }
820 };
821 
828 {
829 private:
831 
832  unsigned int index_;
833  unsigned int influence_;
834  // Determines whether point is undecided.
835  bool undecided_;
836  // Determines wheter point is C point (true) or F point (false).
837  bool cpoint_;
838  unsigned int coarse_index_;
839  // Index offset of parallel coarsening. In that case a point acts as if it had an index of index_-offset_ and treats other points as if they had an index of index+offset_
840  unsigned int offset_;
841  // Aggregate the point belongs to.
842  unsigned int aggregate_;
843 
844  // Holds all points influencing this point.
845  ListType influencing_points_;
846  // Holds all points that are influenced by this point.
847  ListType influenced_points_;
848 
849 public:
852 
855  amg_point (unsigned int index, unsigned int size): index_(index), influence_(0), undecided_(true), cpoint_(false), coarse_index_(0), offset_(0), aggregate_(0)
856  {
857  influencing_points_ = ListType(size);
858  influenced_points_ = ListType(size);
859  }
860 
861  void set_offset(unsigned int offset) { offset_ = offset; }
862  unsigned int get_offset() { return offset_; }
863  void set_index(unsigned int index) { index_ = index+offset_; }
864  unsigned int get_index() const { return index_-offset_; }
865  unsigned int get_influence() const { return influence_; }
866  void set_aggregate(unsigned int aggregate) { aggregate_ = aggregate; }
867  unsigned int get_aggregate () { return aggregate_; }
868 
869  bool is_cpoint() const { return cpoint_ && !undecided_; }
870  bool is_fpoint() const { return !cpoint_ && !undecided_; }
871  bool is_undecided() const { return undecided_; }
872 
873  // Returns number of influencing points
874  unsigned int number_influencing() const { return influencing_points_.internal_size(); }
875  // Returns true if *point is influencing this point
876  bool is_influencing(amg_point* point) const { return influencing_points_.isnonzero(point->get_index()+offset_); }
877  // Add *point to influencing points
878  void add_influencing_point(amg_point* point) { influencing_points_[point->get_index()+offset_] = point; }
879  // Add *point to influenced points
880  void add_influenced_point(amg_point* point) { influenced_points_[point->get_index()+offset_] = point; }
881 
882  // Clear influencing points
883  void clear_influencing() { influencing_points_.clear(); }
884  // Clear influenced points
885  void clear_influenced() {influenced_points_.clear(); }
886 
887 
888  unsigned int get_coarse_index() const { return coarse_index_; }
889  void set_coarse_index(unsigned int index) { coarse_index_ = index; }
890 
891  // Calculates the initial influence measure equal to the number of influenced points.
892  void calc_influence() { influence_ = influenced_points_.internal_size(); }
893 
894  // Add to influence measure.
895  unsigned int add_influence(unsigned int add)
896  {
897  influence_ += add;
898  return influence_;
899  }
900  // Make this point C point. Only call via amg_pointvector.
901  void make_cpoint()
902  {
903  undecided_ = false;
904  cpoint_ = true;
905  influence_ = 0;
906  }
907  // Make this point F point. Only call via amg_pointvector.
908  void make_fpoint()
909  {
910  undecided_ = false;
911  cpoint_ = false;
912  influence_ = 0;
913  }
914  // Switch point from F to C point. Only call via amg_pointvector.
915  void switch_ftoc() { cpoint_ = true; }
916 
917  // Iterator handling for influencing and influenced points.
918  iterator begin_influencing() { return influencing_points_.begin(); }
919  iterator end_influencing() { return influencing_points_.end(); }
920  const_iterator begin_influencing() const { return influencing_points_.begin(); }
921  const_iterator end_influencing() const { return influencing_points_.end(); }
922  iterator begin_influenced() { return influenced_points_.begin(); }
923  iterator end_influenced() { return influenced_points_.end(); }
924  const_iterator begin_influenced() const { return influenced_points_.begin(); }
925  const_iterator end_influenced() const { return influenced_points_.end(); }
926 };
927 
930 struct classcomp
931 {
932  // Function returns true if l comes before r in the ordering.
933  bool operator() (amg_point* l, amg_point* r) const
934  {
935  // Map is sorted by influence number starting with the highest
936  // If influence number is the same then lowest point index comes first
937  return (l->get_influence() < r->get_influence() || (l->get_influence() == r->get_influence() && l->get_index() > r->get_index()));
938  }
939 };
940 
947 {
948 private:
949  // Type for the sorted list
950  typedef std::set<amg_point*,classcomp> ListType;
951  // Type for the vector of pointers
952  typedef std::vector<amg_point*> VectorType;
953 
954  VectorType pointvector_;
955  ListType pointlist_;
956  unsigned int size_;
957  unsigned int c_points_, f_points_;
958 
959 public:
960  typedef VectorType::iterator iterator;
961  typedef VectorType::const_iterator const_iterator;
962 
966  amg_pointvector(unsigned int size = 0): size_(size)
967  {
968  pointvector_ = VectorType(size);
969  c_points_ = f_points_ = 0;
970  }
971 
972  // Construct all the points dynamically and save pointers into vector.
973  void init_points()
974  {
975  for (unsigned int i=0; i<size(); ++i)
976  pointvector_[i] = new amg_point(i,size());
977  }
978  // Delete all the points.
980  {
981  for (unsigned int i=0; i<size(); ++i)
982  delete pointvector_[i];
983  }
984  // Add point to the vector. Note: User has to make sure that point at point->get_index() does not exist yet, otherwise it will be overwritten!
985  void add_point(amg_point *point)
986  {
987  pointvector_[point->get_index()] = point;
988  if (point->is_cpoint()) c_points_++;
989  else if (point->is_fpoint()) f_points_++;
990  }
991 
992  // Update C and F count for point *point.
993  // Necessary if C and F points were constructed outside this data structure (e.g. by parallel coarsening RS0 or RS3).
994  void update_cf(amg_point *point)
995  {
996  if (point->is_cpoint()) c_points_++;
997  else if (point->is_fpoint()) f_points_++;
998  }
999  // Clear the C and F point count.
1000  void clear_cf() { c_points_ = f_points_ = 0; }
1001 
1002  // Clear both point lists.
1004  {
1005  for (iterator iter = pointvector_.begin(); iter != pointvector_.end(); ++iter)
1006  {
1007  (*iter)->clear_influencing();
1008  (*iter)->clear_influenced();
1009  }
1010  }
1011 
1012  amg_point* operator[](unsigned int i) const { return pointvector_[i]; }
1013  iterator begin() { return pointvector_.begin(); }
1014  iterator end() { return pointvector_.end(); }
1015  const_iterator begin() const { return pointvector_.begin(); }
1016  const_iterator end() const { return pointvector_.end(); }
1017 
1018  void resize(unsigned int size)
1019  {
1020  size_ = size;
1021  pointvector_ = VectorType(size);
1022  }
1023  unsigned int size() const { return size_; }
1024 
1025  // Returns number of C points
1026  unsigned int get_cpoints() const { return c_points_; }
1027  // Returns number of F points
1028  unsigned int get_fpoints() const { return f_points_; }
1029 
1030  // Does the initial sorting of points into the list. Sorting is automatically done by the std::set data type.
1031  void sort()
1032  {
1033  for (iterator iter = begin(); iter != end(); ++iter)
1034  pointlist_.insert(*iter);
1035  }
1036  // Returns the point with the highest influence measure
1038  {
1039  // No points remaining? Return NULL such that coarsening will stop.
1040  if (pointlist_.size() == 0)
1041  return NULL;
1042  // If point with highest influence measure (end of the list) has measure of zero, then no further C points can be constructed. Return NULL.
1043  if ((*(--pointlist_.end()))->get_influence() == 0)
1044  return NULL;
1045  // Otherwise, return the point with highest influence measure located at the end of the list.
1046  else
1047  return *(--pointlist_.end());
1048  }
1049  // Add "add" to influence measure for point *point in the sorted list.
1050  void add_influence(amg_point* point, unsigned int add)
1051  {
1052  ListType::iterator iter = pointlist_.find(point);
1053  // If point is not in the list then stop.
1054  if (iter == pointlist_.end()) return;
1055 
1056  // Point has to be erased first as changing the value does not re-order the std::set
1057  pointlist_.erase(iter);
1058  point->add_influence(add);
1059 
1060  // Insert point back into the list. Using the iterator improves performance. The new position has to be at the same position or to the right of the old.
1061  pointlist_.insert(point);
1062  }
1063  // Make *point to C point and remove from sorted list
1064  void make_cpoint(amg_point* point)
1065  {
1066  pointlist_.erase(point);
1067  point->make_cpoint();
1068  c_points_++;
1069  }
1070  // Make *point to F point and remove from sorted list
1071  void make_fpoint(amg_point* point)
1072  {
1073  pointlist_.erase(point);
1074  point->make_fpoint();
1075  f_points_++;
1076  }
1077  // Swich *point from F to C point
1078  void switch_ftoc(amg_point* point)
1079  {
1080  point->switch_ftoc();
1081  c_points_++;
1082  f_points_--;
1083  }
1084 
1085  // Build vector of indices for C point on the coarse level.
1087  {
1088  unsigned int count = 0;
1089  // Use simple counter for index creation.
1090  for (iterator iter = pointvector_.begin(); iter != pointvector_.end(); ++iter)
1091  {
1092  // Set index on coarse level using counter variable
1093  if ((*iter)->is_cpoint())
1094  {
1095  (*iter)->set_coarse_index(count);
1096  count++;
1097  }
1098  }
1099  }
1100 
1101  // Return information for debugging purposes
1102  template<typename MatrixT>
1103  void get_influence_matrix(MatrixT & mat) const
1104  {
1105  mat = MatrixT(size(),size());
1106  mat.clear();
1107 
1108  for (const_iterator row_iter = begin(); row_iter != end(); ++row_iter)
1109  for (amg_point::iterator col_iter = (*row_iter)->begin_influencing(); col_iter != (*row_iter)->end_influencing(); ++col_iter)
1110  mat((*row_iter)->get_index(),(*col_iter)->get_index()) = true;
1111  }
1112  template<typename VectorT>
1113  void get_influence(VectorT & vec) const
1114  {
1115  vec = VectorT(size_);
1116  vec.clear();
1117 
1118  for (const_iterator iter = begin(); iter != end(); ++iter)
1119  vec[(*iter)->get_index()] = (*iter)->get_influence();
1120  }
1121  template<typename VectorT>
1122  void get_sorting(VectorT & vec) const
1123  {
1124  vec = VectorT(pointlist_.size());
1125  vec.clear();
1126  unsigned int i=0;
1127 
1128  for (ListType::const_iterator iter = pointlist_.begin(); iter != pointlist_.end(); ++iter)
1129  {
1130  vec[i] = (*iter)->get_index();
1131  i++;
1132  }
1133  }
1134  template<typename VectorT>
1135  void get_C(VectorT & vec) const
1136  {
1137  vec = VectorT(size_);
1138  vec.clear();
1139 
1140  for (const_iterator iter = begin(); iter != end(); ++iter)
1141  {
1142  if ((*iter)->is_cpoint())
1143  vec[(*iter)->get_index()] = true;
1144  }
1145  }
1146  template<typename VectorT>
1147  void get_F(VectorT & vec) const
1148  {
1149  vec = VectorT(size_);
1150  vec.clear();
1151 
1152  for (const_iterator iter = begin(); iter != end(); ++iter)
1153  {
1154  if ((*iter)->is_fpoint())
1155  vec[(*iter)->get_index()] = true;
1156  }
1157  }
1158  template<typename MatrixT>
1159  void get_Aggregates(MatrixT & mat) const
1160  {
1161  mat = MatrixT(size_,size_);
1162  mat.clear();
1163 
1164  for (const_iterator iter = begin(); iter != end(); ++iter)
1165  {
1166  if (!(*iter)->is_undecided())
1167  mat((*iter)->get_aggregate(),(*iter)->get_index()) = true;
1168  }
1169  }
1170 };
1171 
1175 template<typename InternalT1, typename InternalT2>
1177 {
1178  typedef typename InternalT1::value_type SparseMatrixType;
1179  typedef typename InternalT2::value_type PointVectorType;
1180 
1181 public:
1182  // Data structures on a per-processor basis.
1183  boost::numeric::ublas::vector<InternalT1> A_slice_;
1184  boost::numeric::ublas::vector<InternalT2> pointvector_slice_;
1185  // Holds the offsets showing the indices for which a new slice begins.
1186  boost::numeric::ublas::vector<boost::numeric::ublas::vector<unsigned int> > offset_;
1187 
1188  unsigned int threads_;
1189  unsigned int levels_;
1190 
1191  void init(unsigned int levels, unsigned int threads = 0)
1192  {
1193  // Either use the number of threads chosen by the user or the maximum number of threads available on the processor.
1194  if (threads == 0)
1195  #ifdef VIENNACL_WITH_OPENMP
1196  threads_ = omp_get_num_procs();
1197  #else
1198  threads_ = 1;
1199  #endif
1200  else
1201  threads_ = threads;
1202 
1203  levels_ = levels;
1204 
1205  A_slice_.resize(threads_);
1206  pointvector_slice_.resize(threads_);
1207  // Offset has threads_+1 entries to also hold the total size
1208  offset_.resize(threads_+1);
1209 
1210  for (unsigned int i=0; i<threads_; ++i)
1211  {
1212  A_slice_[i].resize(levels_);
1213  pointvector_slice_[i].resize(levels_);
1214  // Offset needs one more level for the build-up of the next offset
1215  offset_[i].resize(levels_+1);
1216  }
1217  offset_[threads_].resize(levels_+1);
1218  } //init()
1219 
1220  // Slice matrix A into as many parts as threads are used.
1221  void slice(unsigned int level, InternalT1 const & A, InternalT2 const & pointvector)
1222  {
1223  // On the finest level, build a new slicing first.
1224  if (level == 0)
1225  slice_new(level, A);
1226 
1227  // On coarser levels use the same slicing as on the finest level (Points stay together on the same thread on all levels).
1228  // This is necessary as due to interpolation and galerkin product there only exist connections between points on the same thread on coarser levels.
1229  // Note: Offset is determined in amg_coarse_rs0() after fine level was built.
1230  slice_build(level, A, pointvector);
1231  }
1232 
1233  // Join point data structure into Pointvector
1234  void join(unsigned int level, InternalT2 & pointvector) const
1235  {
1236  typedef typename InternalT2::value_type PointVectorType;
1237 
1238  // Reset index offset of all points and update overall C and F point count
1239  pointvector[level].clear_cf();
1240  for (typename PointVectorType::iterator iter = pointvector[level].begin(); iter != pointvector[level].end(); ++iter)
1241  {
1242  (*iter)->set_offset(0);
1243  pointvector[level].update_cf(*iter);
1244  }
1245  }
1246 
1247 private:
1252  void slice_new(unsigned int level, InternalT1 const & A)
1253  {
1254  // Determine index offset of all the slices (index of A[level] when the respective slice starts).
1255  #ifdef VIENNACL_WITH_OPENMP
1256  #pragma omp parallel for
1257  #endif
1258  for (long i2=0; i2<=static_cast<long>(threads_); ++i2)
1259  {
1260  std::size_t i = static_cast<std::size_t>(i2);
1261 
1262  // Offset of first piece is zero. Pieces 1,...,threads-1 have equal size while the last one might be greater.
1263  if (i == 0) offset_[i][level] = 0;
1264  else if (i == threads_) offset_[i][level] = static_cast<unsigned int>(A[level].size1());
1265  else offset_[i][level] = static_cast<unsigned int>(i*(A[level].size1()/threads_));
1266  }
1267  }
1268 
1274  void slice_build(unsigned int level, InternalT1 const & A, InternalT2 const & pointvector)
1275  {
1276  typedef typename SparseMatrixType::const_iterator1 ConstRowIterator;
1277  typedef typename SparseMatrixType::const_iterator2 ConstColIterator;
1278 
1279  #ifdef VIENNACL_WITH_OPENMP
1280  #pragma omp parallel for
1281  #endif
1282  for (long i2=0; i2<static_cast<long>(threads_); ++i2)
1283  {
1284  std::size_t i = static_cast<std::size_t>(i2);
1285 
1286  amg_point *point;
1287 
1288  // Allocate space for the matrix slice and the pointvector.
1289  A_slice_[i][level] = SparseMatrixType(offset_[i+1][level]-offset_[i][level], offset_[i+1][level]-offset_[i][level]);
1290  pointvector_slice_[i][level] = PointVectorType(offset_[i+1][level]-offset_[i][level]);
1291 
1292  // Iterate over the part that belongs to thread i (from Offset[i][level] to Offset[i+1][level]).
1293  ConstRowIterator row_iter = A[level].begin1();
1294  row_iter += offset_[i][level];
1295  unsigned int x = static_cast<unsigned int>(row_iter.index1());
1296 
1297  while (x < offset_[i+1][level] && row_iter != A[level].end1())
1298  {
1299  // Set offset for point index and save point for the respective thread
1300  point = pointvector[level][x];
1301  point->set_offset(offset_[i][level]);
1302  pointvector_slice_[i][level].add_point(point);
1303 
1304  ConstColIterator col_iter = row_iter.begin();
1305  unsigned int y = static_cast<unsigned int>(col_iter.index2());
1306 
1307  // Save all coefficients from the matrix slice
1308  while (y < offset_[i+1][level] && col_iter != row_iter.end())
1309  {
1310  if (y >= offset_[i][level])
1311  A_slice_[i][level](x-offset_[i][level], y-offset_[i][level]) = *col_iter;
1312 
1313  ++col_iter;
1314  y = static_cast<unsigned int>(col_iter.index2());
1315  }
1316 
1317  ++row_iter;
1318  x = static_cast<unsigned int>(row_iter.index1());
1319  }
1320  }
1321  }
1322 };
1323 
1329 template<typename SparseMatrixT>
1330 void amg_mat_prod (SparseMatrixT & A, SparseMatrixT & B, SparseMatrixT & RES)
1331 {
1332  typedef typename SparseMatrixT::value_type ScalarType;
1333  typedef typename SparseMatrixT::iterator1 InternalRowIterator;
1334  typedef typename SparseMatrixT::iterator2 InternalColIterator;
1335 
1336  RES = SparseMatrixT(static_cast<unsigned int>(A.size1()), static_cast<unsigned int>(B.size2()));
1337  RES.clear();
1338 
1339 #ifdef VIENNACL_WITH_OPENMP
1340  #pragma omp parallel for
1341 #endif
1342  for (long x=0; x<static_cast<long>(A.size1()); ++x)
1343  {
1344  InternalRowIterator row_iter = A.begin1();
1345  row_iter += vcl_size_t(x);
1346  for (InternalColIterator col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
1347  {
1348  unsigned int y = static_cast<unsigned int>(col_iter.index2());
1349  InternalRowIterator row_iter2 = B.begin1();
1350  row_iter2 += vcl_size_t(y);
1351 
1352  for (InternalColIterator col_iter2 = row_iter2.begin(); col_iter2 != row_iter2.end(); ++col_iter2)
1353  {
1354  unsigned int z = static_cast<unsigned int>(col_iter2.index2());
1355  ScalarType prod = *col_iter * *col_iter2;
1356  RES.add(static_cast<unsigned int>(x),static_cast<unsigned int>(z),prod);
1357  }
1358  }
1359  }
1360 }
1361 
1367 template<typename SparseMatrixT>
1368 void amg_galerkin_prod (SparseMatrixT & A, SparseMatrixT & P, SparseMatrixT & RES)
1369 {
1370  typedef typename SparseMatrixT::value_type ScalarType;
1371  typedef typename SparseMatrixT::iterator1 InternalRowIterator;
1372  typedef typename SparseMatrixT::iterator2 InternalColIterator;
1373 
1374  RES = SparseMatrixT(static_cast<unsigned int>(P.size2()), static_cast<unsigned int>(P.size2()));
1375  RES.clear();
1376 
1377 #ifdef VIENNACL_WITH_OPENMP
1378  #pragma omp parallel for
1379 #endif
1380  for (long x=0; x<static_cast<long>(P.size2()); ++x)
1381  {
1382  amg_sparsevector<ScalarType> row(static_cast<unsigned int>(A.size2()));
1383  InternalRowIterator row_iter = P.begin1(true);
1384  row_iter += vcl_size_t(x);
1385 
1386  for (InternalColIterator col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
1387  {
1388  long y1 = static_cast<long>(col_iter.index2());
1389  InternalRowIterator row_iter2 = A.begin1();
1390  row_iter2 += vcl_size_t(y1);
1391 
1392  for (InternalColIterator col_iter2 = row_iter2.begin(); col_iter2 != row_iter2.end(); ++col_iter2)
1393  {
1394  long y2 = static_cast<long>(col_iter2.index2());
1395  row.add (static_cast<unsigned int>(y2), *col_iter * *col_iter2);
1396  }
1397  }
1398  for (typename amg_sparsevector<ScalarType>::iterator iter = row.begin(); iter != row.end(); ++iter)
1399  {
1400  long y2 = iter.index();
1401  InternalRowIterator row_iter3 = P.begin1();
1402  row_iter3 += vcl_size_t(y2);
1403 
1404  for (InternalColIterator col_iter3 = row_iter3.begin(); col_iter3 != row_iter3.end(); ++col_iter3)
1405  {
1406  long z = static_cast<long>(col_iter3.index2());
1407  RES.add (static_cast<unsigned int>(x), static_cast<unsigned int>(z), *col_iter3 * *iter);
1408  }
1409  }
1410  }
1411 
1412  #ifdef VIENNACL_AMG_DEBUG
1413  std::cout << "Galerkin Operator: " << std::endl;
1414  printmatrix (RES);
1415  #endif
1416 }
1417 
1423 template<typename SparseMatrixT>
1424 void test_triplematprod(SparseMatrixT & A, SparseMatrixT & P, SparseMatrixT & A_i1)
1425 {
1426  typedef typename SparseMatrixT::value_type ScalarType;
1427 
1428  boost::numeric::ublas::compressed_matrix<ScalarType> A_temp (A.size1(), A.size2());
1429  A_temp = A;
1430  boost::numeric::ublas::compressed_matrix<ScalarType> P_temp (P.size1(), P.size2());
1431  P_temp = P;
1432  P.set_trans(true);
1433  boost::numeric::ublas::compressed_matrix<ScalarType> R_temp (P.size1(), P.size2());
1434  R_temp = P;
1435  P.set_trans(false);
1436 
1437  boost::numeric::ublas::compressed_matrix<ScalarType> RA (R_temp.size1(),A_temp.size2());
1438  RA = boost::numeric::ublas::prod(R_temp,A_temp);
1439  boost::numeric::ublas::compressed_matrix<ScalarType> RAP (RA.size1(),P_temp.size2());
1440  RAP = boost::numeric::ublas::prod(RA,P_temp);
1441 
1442  for (unsigned int x=0; x<RAP.size1(); ++x)
1443  {
1444  for (unsigned int y=0; y<RAP.size2(); ++y)
1445  {
1446  if (std::fabs(static_cast<ScalarType>(RAP(x,y)) - static_cast<ScalarType>(A_i1(x,y))) > 0.0001)
1447  std::cout << x << " " << y << " " << RAP(x,y) << " " << A_i1(x,y) << std::endl;
1448  }
1449  }
1450 }
1451 
1457 template<typename SparseMatrixT, typename PointVectorT>
1458 void test_interpolation(SparseMatrixT & A, SparseMatrixT & P, PointVectorT & Pointvector)
1459 {
1460  for (unsigned int i=0; i<P.size1(); ++i)
1461  {
1462  if (Pointvector.is_cpoint(i))
1463  {
1464  bool set = false;
1465  for (unsigned int j=0; j<P.size2(); ++j)
1466  {
1467  if (P.isnonzero(i,j))
1468  {
1469  if (P(i,j) != 1)
1470  std::cout << "Error 1 in row " << i << std::endl;
1471  if (P(i,j) == 1 && set)
1472  std::cout << "Error 2 in row " << i << std::endl;
1473  if (P(i,j) == 1 && !set)
1474  set = true;
1475  }
1476  }
1477  }
1478 
1479  if (Pointvector.is_fpoint(i))
1480  for (unsigned int j=0; j<P.size2(); ++j)
1481  {
1482  if (P.isnonzero(i,j) && j> Pointvector.get_cpoints()-1)
1483  std::cout << "Error 3 in row " << i << std::endl;
1484  if (P.isnonzero(i,j))
1485  {
1486  bool set = false;
1487  for (unsigned int k=0; k<P.size1(); ++k)
1488  {
1489  if (P.isnonzero(k,j))
1490  {
1491  if (Pointvector.is_cpoint(k) && P(k,j) == 1 && A.isnonzero(i,k))
1492  set = true;
1493  }
1494  }
1495  if (!set)
1496  std::cout << "Error 4 in row " << i << std::endl;
1497  }
1498  }
1499  }
1500 }
1501 
1502 
1503 } //namespace amg
1504 }
1505 }
1506 }
1507 
1508 #endif
NumericT operator+=(const NumericT value)
Addition operator. Adds a constant.
Definition: amg_base.hpp:170
amg_sparsematrix(unsigned int i, unsigned int j)
Constructor. Builds matrix of size (i,j).
Definition: amg_base.hpp:431
void addscalar(IteratorT &iter, unsigned int i, unsigned int j, NumericT s)
Definition: amg_base.hpp:575
void set_offset(unsigned int offset)
Definition: amg_base.hpp:861
Debug functionality for AMG. To be removed.
void set_aggregate(unsigned int aggregate)
Definition: amg_base.hpp:866
unsigned int get_coarselevels() const
Definition: amg_base.hpp:111
const_iterator end_influencing() const
Definition: amg_base.hpp:921
amg_sparsevector_iterator(InternalT &vec, bool begin=true)
The constructor.
Definition: amg_base.hpp:224
A class for the AMG points. Saves point index and influence measure Holds information whether point i...
Definition: amg_base.hpp:827
void slice(unsigned int level, InternalT1 const &A, InternalT2 const &pointvector)
Definition: amg_base.hpp:1221
bool isnonzero(unsigned int i, unsigned int j) const
Definition: amg_base.hpp:532
boost::numeric::ublas::vector< InternalT2 > pointvector_slice_
Definition: amg_base.hpp:1184
void removescalar(IteratorT &iter, unsigned int i)
Definition: amg_base.hpp:591
unsigned int number_influencing() const
Definition: amg_base.hpp:874
void resize(vcl_size_t i, vcl_size_t j, bool preserve=true)
Definition: adapter.hpp:389
void set_index(unsigned int index)
Definition: amg_base.hpp:863
NumericT operator=(const NumericT value)
Assignment operator. Writes value into matrix at the given position.
Definition: amg_base.hpp:157
A class for the sparse vector type.
Definition: amg_base.hpp:260
void set_postsmooth(unsigned int postsmooth)
Definition: amg_base.hpp:107
unsigned int get_postsmooth() const
Definition: amg_base.hpp:108
unsigned int get_coarse_index() const
Definition: amg_base.hpp:888
const_iterator2 end2(bool trans=false) const
Definition: amg_base.hpp:777
void add(unsigned int i, NumericT s)
Definition: amg_base.hpp:302
void set_as(double jacobiweight)
Definition: amg_base.hpp:98
const_iterator2 begin2(bool trans=false) const
Definition: amg_base.hpp:770
amg_point(unsigned int index, unsigned int size)
The constructor.
Definition: amg_base.hpp:855
const_iterator begin_influenced() const
Definition: amg_base.hpp:924
std::vector< std::map< unsigned int, NumericT > > * get_internal_pointer()
Definition: amg_base.hpp:785
ConstAdapterType::const_iterator1 const_iterator1
Definition: amg_base.hpp:417
A const iterator for sparse matrices of type std::vector > ...
Definition: adapter.hpp:48
void resize(unsigned int i, unsigned int j, bool preserve=true)
Definition: amg_base.hpp:643
ConstAdapterType::const_iterator2 const_iterator2
Definition: amg_base.hpp:418
void removescalar(IteratorT &iter, unsigned int)
Definition: amg_base.hpp:336
bool is_influencing(amg_point *point) const
Definition: amg_base.hpp:876
VectorT prod(std::vector< std::vector< T, A1 >, A2 > const &matrix, VectorT const &vector)
Definition: prod.hpp:91
vcl_size_t size(VectorType const &vec)
Generic routine for obtaining the size of a vector (ViennaCL, uBLAS, etc.)
Definition: size.hpp:144
void set_interpol(unsigned int interpol)
Definition: amg_base.hpp:92
unsigned int add_influence(unsigned int add)
Definition: amg_base.hpp:895
A tag for algebraic multigrid (AMG). Used to transport information from the user to the implementatio...
Definition: amg_base.hpp:61
amg_pointvector(unsigned int size=0)
The constructor.
Definition: amg_base.hpp:966
void set_coarse_index(unsigned int index)
Definition: amg_base.hpp:889
boost::numeric::ublas::vector< InternalT1 > A_slice_
Definition: amg_base.hpp:1183
NonzeroScalarType operator[](unsigned int i)
Definition: amg_base.hpp:339
void set_coarse(unsigned int coarse)
Definition: amg_base.hpp:89
amg_sparsevector(unsigned int size=0)
The constructor.
Definition: amg_base.hpp:289
void init(unsigned int levels, unsigned int threads=0)
Definition: amg_base.hpp:1191
unsigned int get_presmooth() const
Definition: amg_base.hpp:105
const_iterator begin_influencing() const
Definition: amg_base.hpp:920
void test_interpolation(SparseMatrixT &A, SparseMatrixT &P, PointVectorT &Pointvector)
Test if interpolation matrix makes sense. Only vanilla test though! Only checks if basic requirements...
Definition: amg_base.hpp:1458
unsigned int get_interpol() const
Definition: amg_base.hpp:93
VectorType::const_iterator const_iterator
Definition: amg_base.hpp:961
void join(unsigned int level, InternalT2 &pointvector) const
Definition: amg_base.hpp:1234
void addscalar(IteratorT &iter, unsigned int i, unsigned int, NumericT s)
Definition: amg_base.hpp:321
void add_influencing_point(amg_point *point)
Definition: amg_base.hpp:878
Adapts a constant sparse matrix type made up from std::vector > to basic ub...
Definition: adapter.hpp:183
A non-const iterator for sparse matrices of type std::vector > ...
Definition: adapter.hpp:237
std::size_t vcl_size_t
Definition: forwards.h:74
ListType::const_iterator const_iterator
Definition: amg_base.hpp:851
void amg_galerkin_prod(SparseMatrixT &A, SparseMatrixT &P, SparseMatrixT &RES)
Sparse Galerkin product: Calculates RES = trans(P)*A*P.
Definition: amg_base.hpp:1368
void trans(const matrix_expression< const matrix_base< NumericT, SizeT, DistanceT >, const matrix_base< NumericT, SizeT, DistanceT >, op_trans > &proxy, matrix_base< NumericT > &temp_trans)
void prod(const MatrixT1 &A, bool transposed_A, const MatrixT2 &B, bool transposed_B, MatrixT3 &C, ScalarT alpha, ScalarT beta)
amg_sparsematrix(MatrixT const &mat)
Constructor. Builds matrix via another matrix type. (Only necessary feature of this other matrix type...
Definition: amg_base.hpp:468
const_iterator end_influenced() const
Definition: amg_base.hpp:925
A class for the sparse matrix type. Uses vector of maps as data structure for higher performance and ...
Definition: amg_base.hpp:385
vector_expression< const matrix_base< NumericT, F >, const unsigned int, op_row > row(const matrix_base< NumericT, F > &A, unsigned int i)
Definition: matrix.hpp:853
void test_triplematprod(SparseMatrixT &A, SparseMatrixT &P, SparseMatrixT &A_i1)
Test triple-matrix product by comparing it to ublas functions. Very slow for large matrices! ...
Definition: amg_base.hpp:1424
amg_point * operator[](unsigned int i) const
Definition: amg_base.hpp:1012
unsigned int get_coarse() const
Definition: amg_base.hpp:90
NumericT operator()(unsigned int i, unsigned int j) const
Definition: amg_base.hpp:621
sparse_matrix_adapted_iterator< NumericT, SizeT,!is_iterator1 > begin() const
Definition: adapter.hpp:331
void add(unsigned int i, unsigned int j, NumericT s)
Definition: amg_base.hpp:551
A class for a scalar that can be written to the sparse matrix or sparse vector datatypes.
Definition: amg_base.hpp:124
void add_influence(amg_point *point, unsigned int add)
Definition: amg_base.hpp:1050
float ScalarType
Definition: fft_1d.cpp:42
amg_nonzero_scalar(InternalT *m, IteratorT &iter, unsigned int i, unsigned int j, NumericT s=0)
The constructor.
Definition: amg_base.hpp:148
void set_threshold(double threshold)
Definition: amg_base.hpp:95
Defines an iterator for the sparse vector type.
Definition: amg_base.hpp:209
void set_presmooth(unsigned int presmooth)
Definition: amg_base.hpp:104
NonzeroScalarType operator()(unsigned int i, unsigned int j)
Definition: amg_base.hpp:598
Adapts a non-const sparse matrix type made up from std::vector > to basic u...
Definition: adapter.hpp:357
void amg_mat_prod(SparseMatrixT &A, SparseMatrixT &B, SparseMatrixT &RES)
Sparse matrix product. Calculates RES = A*B.
Definition: amg_base.hpp:1330
void printmatrix(MatrixT &, int)
Definition: amg_debug.hpp:77
amg_tag(unsigned int coarse=1, unsigned int interpol=1, double threshold=0.25, double interpolweight=0.2, double jacobiweight=1, unsigned int presmooth=1, unsigned int postsmooth=1, unsigned int coarselevels=0)
The constructor.
Definition: amg_base.hpp:76
void add_influenced_point(amg_point *point)
Definition: amg_base.hpp:880
bool operator()(amg_point *l, amg_point *r) const
Definition: amg_base.hpp:933
const_iterator1 end1(bool trans=false) const
Definition: amg_base.hpp:763
InternalType::const_iterator const_iterator
Definition: amg_base.hpp:283
void set_interpolweight(double interpolweight)
Definition: amg_base.hpp:101
Comparison class for the sorted set of points in amg_pointvector. Set is sorted by influence measure ...
Definition: amg_base.hpp:930
amg_sparsevector_iterator< InternalType > iterator
Definition: amg_base.hpp:282
amg_sparsematrix(std::vector< std::map< unsigned int, NumericT > > const &mat)
Constructor. Builds matrix via std::vector by copying memory (Only necessary feature of thi...
Definition: amg_base.hpp:449
boost::numeric::ublas::vector< boost::numeric::ublas::vector< unsigned int > > offset_
Definition: amg_base.hpp:1186
void set_coarselevels(unsigned int coarselevels)
Definition: amg_base.hpp:110
void get_influence_matrix(MatrixT &mat) const
Definition: amg_base.hpp:1103
A class for the matrix slicing for parallel coarsening schemes (RS0/RS3).
Definition: amg_base.hpp:1176
A class for the AMG points. Holds pointers of type amg_point in a vector that can be accessed using [...
Definition: amg_base.hpp:946