This file is indexed.

/usr/include/deal.II/lac/trilinos_vector.h is in libdeal.ii-dev 8.1.0-4.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
// ---------------------------------------------------------------------
// $Id: trilinos_vector.h 30040 2013-07-18 17:06:48Z maier $
//
// Copyright (C) 2008 - 2013 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE at
// the top level of the deal.II distribution.
//
// ---------------------------------------------------------------------

#ifndef __deal2__trilinos_vector_h
#define __deal2__trilinos_vector_h


#include <deal.II/base/config.h>

#ifdef DEAL_II_WITH_TRILINOS

#  include <deal.II/base/std_cxx1x/shared_ptr.h>
#  include <deal.II/base/subscriptor.h>
#  include <deal.II/base/index_set.h>
#  include <deal.II/base/utilities.h>
#  include <deal.II/lac/exceptions.h>
#  include <deal.II/lac/vector.h>
#  include <deal.II/lac/trilinos_vector_base.h>

#  include "Epetra_Map.h"
#  include "Epetra_LocalMap.h"

DEAL_II_NAMESPACE_OPEN


// forward declaration
template <typename> class Vector;

/**
 * @addtogroup TrilinosWrappers
 *@{
 */
namespace TrilinosWrappers
{
  class SparseMatrix;

  namespace
  {
#ifndef DEAL_II_USE_LARGE_INDEX_TYPE
    // define a helper function that queries the global ID of local ID of
    // an Epetra_BlockMap object  by calling either the 32- or 64-bit
    // function necessary.
    int gid(const Epetra_BlockMap &map, int i)
    {
      return map.GID(i);
    }
#else
    // define a helper function that queries the global ID of local ID of
    // an Epetra_BlockMap object  by calling either the 32- or 64-bit
    // function necessary.
    long long int gid(const Epetra_BlockMap &map, int i)
    {
      return map.GID64(i);
    }
#endif
  }

  /**
   * Namespace for Trilinos vector classes that work in parallel over
   * MPI. This namespace is restricted to vectors only, whereas matrices
   * are always MPI based when run on more than one processor.
   *
   * @ingroup TrilinosWrappers
   * @author Martin Kronbichler, Wolfgang Bangerth, 2008
   */
  namespace MPI
  {
    class BlockVector;

    /**
     * This class implements a wrapper to use the Trilinos distributed
     * vector class Epetra_FEVector. This class is derived from the
     * TrilinosWrappers::VectorBase class and provides all functionality
     * included there.
     *
     * Note that Trilinos only guarantees that operations do what you expect
     * if the function @p GlobalAssemble has been called after vector
     * assembly in order to distribute the data. This is necessary since
     * some processes might have accumulated data of elements that are not
     * owned by themselves, but must be sent to the owning process. In order
     * to avoid using the wrong data, you need to call Vector::compress()
     * before you actually use the vectors.
     *
     * <h3>Parallel communication model</h3>
     *
     * The parallel functionality of Trilinos is built on top of the Message
     * Passing Interface (MPI). MPI's communication model is built on
     * collective communications: if one process wants something from
     * another, that other process has to be willing to accept this
     * communication. A process cannot query data from another process by
     * calling a remote function, without that other process expecting such
     * a transaction. The consequence is that most of the operations in the
     * base class of this class have to be called collectively. For example,
     * if you want to compute the l2 norm of a parallel vector, @em all
     * processes across which this vector is shared have to call the @p
     * l2_norm function. If you don't do this, but instead only call the @p
     * l2_norm function on one process, then the following happens: This one
     * process will call one of the collective MPI functions and wait for
     * all the other processes to join in on this. Since the other processes
     * don't call this function, you will either get a time-out on the first
     * process, or, worse, by the time the next a callto a Trilinos function
     * generates an MPI message on the other processes, you will get a
     * cryptic message that only a subset of processes attempted a
     * communication. These bugs can be very hard to figure out, unless you
     * are well-acquainted with the communication model of MPI, and know
     * which functions may generate MPI messages.
     *
     * One particular case, where an MPI message may be generated
     * unexpectedly is discussed below.
     *
     * <h3>Accessing individual elements of a vector</h3>
     *
     * Trilinos does allow read access to individual elements of a vector,
     * but in the distributed case only to elements that are stored
     * locally. We implement this through calls like
     * <tt>d=vec(i)</tt>. However, if you access an element outside the
     * locally stored range, an exception is generated.
     *
     * In contrast to read access, Trilinos (and the respective deal.II
     * wrapper classes) allow to write (or add) to individual elements of
     * vectors, even if they are stored on a different process. You can do
     * this writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>,
     * or similar operations. There is one catch, however, that may lead to
     * very confusing error messages: Trilinos requires application programs
     * to call the compress() function when they switch from adding, to
     * elements to writing to elements. The reasoning is that all processes
     * might accumulate addition operations to elements, even if multiple
     * processes write to the same elements. By the time we call compress()
     * the next time, all these additions are executed. However, if one
     * process adds to an element, and another overwrites to it, the order
     * of execution would yield non-deterministic behavior if we don't make
     * sure that a synchronisation with compress() happens in between.
     *
     * In order to make sure these calls to compress() happen at the
     * appropriate time, the deal.II wrappers keep a state variable that
     * store which is the presently allowed operation: additions or
     * writes. If it encounters an operation of the opposite kind, it calls
     * compress() and flips the state. This can sometimes lead to very
     * confusing behavior, in code that may for example look like this:
     *
     * @code
     * TrilinosWrappers::Vector vector;
     * // do some write operations on the vector
     * for (size_type i=0; i<vector->size(); ++i)
     *   vector(i) = i;
     *
     *                   // do some additions to vector elements, but
     *                   // only for some elements
     *   for (size_type i=0; i<vector->size(); ++i)
     *     if (some_condition(i) == true)
     *       vector(i) += 1;
     *
     *                   // do another collective operation
     *   const double norm = vector->l2_norm();
     * @endcode
     *
     * This code can run into trouble: by the time we see the first addition
     * operation, we need to flush the overwrite buffers for the vector, and
     * the deal.II library will do so by calling compress(). However, it
     * will only do so for all processes that actually do an addition -- if
     * the condition is never true for one of the processes, then this one
     * will not get to the actual compress() call, whereas all the other
     * ones do. This gets us into trouble, since all the other processes
     * hang in the call to flush the write buffers, while the one other
     * process advances to the call to compute the l2 norm. At this time,
     * you will get an error that some operation was attempted by only a
     * subset of processes. This behavior may seem surprising, unless you
     * know that write/addition operations on single elements may trigger
     * this behavior.
     *
     * The problem described here may be avoided by placing additional calls
     * to compress(), or making sure that all processes do the same type of
     * operations at the same time, for example by placing zero additions if
     * necessary.
     *
     * @ingroup TrilinosWrappers
     * @ingroup Vectors
     * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
     */
    class Vector : public VectorBase
    {
    public:
      /**
       * Declare type for container size.
       */
      typedef dealii::types::global_dof_index size_type;

      /**
       * A variable that indicates whether this vector
       * supports distributed data storage. If true, then
       * this vector also needs an appropriate compress()
       * function that allows communicating recent set or
       * add operations to individual elements to be communicated
       * to other processors.
       *
       * For the current class, the variable equals
       * true, since it does support parallel data storage.
       */
      static const bool supports_distributed_data = true;

      /**
       * @name Basic constructors and initalization.
       */
      //@{
      /**
       * Default constructor that
       * generates an empty (zero size)
       * vector. The function
       * <tt>reinit()</tt> will have to
       * give the vector the correct
       * size and distribution among
       * processes in case of an MPI
       * run.
       */
      Vector ();

      /**
       * Copy constructor using the
       * given vector.
       */
      Vector (const Vector &V);

      /**
       * Destructor.
       */
      ~Vector ();

      /**
       * Reinit functionality. This
       * function sets the calling vector
       * to the dimension and the parallel
       * distribution of the input vector,
       * but does not copy the elements in
       * <tt>v</tt>. If <tt>fast</tt> is
       * not <tt>true</tt>, the elements in
       * the vector are initialized with
       * zero, otherwise the content will
       * be left unchanged and the user has
       * to set all elements.
       *
       * This function has a third  argument,
       * <tt>allow_different_maps</tt>,
       * that allows for an exchange of
       * data between two equal-sized
       * vectors (but being distributed
       * differently among the
       * processors). A trivial application
       * of this function is to generate a
       * replication of a whole vector on
       * each machine, when the calling
       * vector is built according to the
       * localized vector class
       * TrilinosWrappers::Vector, and
       * <tt>v</tt> is a distributed
       * vector. In this case, the variable
       * <tt>fast</tt> needs to be set to
       * <tt>false</tt>, since it does not
       * make sense to exchange data
       * between differently parallelized
       * vectors without touching the
       * elements.
       */
      void reinit (const VectorBase &v,
                   const bool        fast = false,
                   const bool        allow_different_maps = false);

      void reinit (const BlockVector &v,
                   const bool         import_data = false);

      /**
       * Reinit function. Creates a vector
       * using the IndexSet local as our
       * own unknowns, add optional ghost
       * values ghost.
       */
      void reinit (const IndexSet &local,
                   const IndexSet &ghost,
                   const MPI_Comm &communicator = MPI_COMM_WORLD);

      /**
       * Set all components of the
       * vector to the given number @p
       * s. Simply pass this down to
       * the base class, but we still
       * need to declare this function
       * to make the example given in
       * the discussion about making
       * the constructor explicit work.
       */
      Vector &operator = (const TrilinosScalar s);

      /**
       * Copy the given vector. Resize
       * the present vector if
       * necessary. In this case, also
       * the Epetra_Map that designs
       * the parallel partitioning is
       * taken from the input vector.
       */
      Vector &
      operator = (const Vector &V);

      /**
       * Copy operator from a given
       * localized vector (present on
       * all processes) in
       * TrilinosWrappers format to the
       * current distributed
       * vector. This function assumes
       * that the calling vector (left
       * hand object) already is of the
       * same size as the right hand
       * side vector. Otherwise, an
       * exception will be thrown.
       */
      Vector &
      operator = (const ::dealii::TrilinosWrappers::Vector &V);

      /**
       * Another copy function. This
       * one takes a deal.II vector and
       * copies it into a
       * TrilinosWrapper vector. Note
       * that since we do not provide
       * any Epetra_map that tells
       * about the partitioning of the
       * vector among the MPI
       * processes, the size of the
       * TrilinosWrapper vector has to
       * be the same as the size of the
       * input vector. In order to
       * change the map, use the
       * reinit(const Epetra_Map
       * &input_map) function.
       */
      template <typename Number>
      Vector &
      operator = (const ::dealii::Vector<Number> &v);

      /**
       * This reinit function is
       * meant to be used for
       * parallel calculations where
       * some non-local data has to
       * be used. The typical
       * situation where one needs
       * this function is the call of
       * the
       * FEValues<dim>::get_function_values
       * function (or of some
       * derivatives) in
       * parallel. Since it is
       * usually faster to retrieve
       * the data in advance, this
       * function can be called
       * before the assembly forks
       * out to the different
       * processors. What this
       * function does is the
       * following: It takes the
       * information in the columns
       * of the given matrix and
       * looks which data couples
       * between the different
       * processors. That data is
       * then queried from the input
       * vector. Note that you should
       * not write to the resulting
       * vector any more, since the
       * some data can be stored
       * several times on different
       * processors, leading to
       * unpredictable results. In
       * particular, such a vector
       * cannot be used for
       * matrix-vector products as
       * for example done during the
       * solution of linear systems.
       */
      void import_nonlocal_data_for_fe
      (const dealii::TrilinosWrappers::SparseMatrix &matrix,
       const Vector                                 &vector);
//@}
      /**
       * @name Initialization with an Epetra_Map
       */
//@{
      /**
       * This constructor takes an
       * Epetra_Map that already knows
       * how to distribute the
       * individual components among
       * the MPI processors. Since it
       * also includes information
       * about the size of the vector,
       * this is all we need to
       * generate a parallel vector.
       */
      explicit Vector (const Epetra_Map &parallel_partitioning);

      /**
       * Copy constructor from the
       * TrilinosWrappers vector
       * class. Since a vector of this
       * class does not necessarily
       * need to be distributed among
       * processes, the user needs to
       * supply us with an Epetra_Map
       * that sets the partitioning
       * details.
       */
      Vector (const Epetra_Map &parallel_partitioning,
              const VectorBase &v);

      /**
       * Reinitialize from a deal.II
       * vector. The Epetra_Map specifies the
       * %parallel partitioning.
       */
      template <typename number>
      void reinit (const Epetra_Map             &parallel_partitioner,
                   const dealii::Vector<number> &v);

      /**
       * Reinit functionality. This
       * function destroys the old
       * vector content and generates a
       * new one based on the input
       * map.
       */
      void reinit (const Epetra_Map &parallel_partitioning,
                   const bool        fast = false);

      /**
       * Copy-constructor from deal.II
       * vectors. Sets the dimension to that
       * of the given vector, and copies all
       * elements.
       */
      template <typename Number>
      Vector (const Epetra_Map             &parallel_partitioning,
              const dealii::Vector<Number> &v);
//@}
      /**
       * @name Initialization with an IndexSet
       */
//@{
      /**
       * This constructor takes an IndexSet
       * that defines how to distribute the
       * individual components among the
       * MPI processors. Since it also
       * includes information about the
       * size of the vector, this is all we
       * need to generate a %parallel
       * vector.
       */
      explicit Vector (const IndexSet &parallel_partitioning,
                       const MPI_Comm &communicator = MPI_COMM_WORLD);

      /**
       * Creates a ghosted parallel vector.
       */
      Vector (const IndexSet &local,
              const IndexSet &ghost,
              const MPI_Comm &communicator = MPI_COMM_WORLD);

      /**
       * Copy constructor from the
       * TrilinosWrappers vector
       * class. Since a vector of this
       * class does not necessarily need to
       * be distributed among processes,
       * the user needs to supply us with
       * an IndexSet and an MPI
       * communicator that set the
       * partitioning details.
       */
      Vector (const IndexSet   &parallel_partitioning,
              const VectorBase &v,
              const MPI_Comm   &communicator = MPI_COMM_WORLD);

      /**
       * Copy-constructor from deal.II
       * vectors. Sets the dimension to
       * that of the given vector, and
       * copies all the elements.
       */
      template <typename Number>
      Vector (const IndexSet               &parallel_partitioning,
              const dealii::Vector<Number> &v,
              const MPI_Comm               &communicator = MPI_COMM_WORLD);

      /**
       * Reinit functionality. This function
       * destroys the old vector content and
       * generates a new one based on the
       * input partitioning.  The flag
       * <tt>fast</tt> determines whether the
       * vector should be filled with zero
       * (false) or left untouched (true).
       */
      void reinit (const IndexSet &parallel_partitioning,
                   const MPI_Comm &communicator = MPI_COMM_WORLD,
                   const bool      fast = false);
//@}
    };




// ------------------- inline and template functions --------------


    /**
     * Global function @p swap which overloads the default implementation
     * of the C++ standard library which uses a temporary object. The
     * function simply exchanges the data of the two vectors.
     *
     * @relates TrilinosWrappers::MPI::Vector
     * @author Martin Kronbichler, Wolfgang Bangerth, 2008
     */
    inline
    void swap (Vector &u, Vector &v)
    {
      u.swap (v);
    }


#ifndef DOXYGEN

    template <typename number>
    Vector::Vector (const Epetra_Map             &input_map,
                    const dealii::Vector<number> &v)
    {
      reinit (input_map, v);
    }



    template <typename number>
    Vector::Vector (const IndexSet               &parallel_partitioner,
                    const dealii::Vector<number> &v,
                    const MPI_Comm               &communicator)
    {
      *this = Vector(parallel_partitioner.make_trilinos_map (communicator, true),
                     v);
    }




    template <typename number>
    void Vector::reinit (const Epetra_Map             &parallel_partitioner,
                         const dealii::Vector<number> &v)
    {
      if (vector.get() != 0 && vector->Map().SameAs(parallel_partitioner))
        vector.reset (new Epetra_FEVector(parallel_partitioner));

      has_ghosts = vector->Map().UniqueGIDs()==false;

      const int size = parallel_partitioner.NumMyElements();

      // Need to copy out values, since the
      // deal.II might not use doubles, so
      // that a direct access is not possible.
      for (int i=0; i<size; ++i)
        (*vector)[0][i] = v(gid(parallel_partitioner,i));
    }


    inline
    Vector &
    Vector::operator = (const TrilinosScalar s)
    {
      VectorBase::operator = (s);

      return *this;
    }


    template <typename Number>
    Vector &
    Vector::operator = (const ::dealii::Vector<Number> &v)
    {
      if (size() != v.size())
        {
          vector.reset (new Epetra_FEVector(Epetra_Map
                                            (static_cast<TrilinosWrappers::types::int_type>(v.size()), 0,
#ifdef DEAL_II_WITH_MPI
                                             Epetra_MpiComm(MPI_COMM_SELF)
#else
                                             Epetra_SerialComm()
#endif
                                            )));
        }

      reinit (vector_partitioner(), v);
      return *this;
    }


#endif

  } /* end of namespace MPI */



  /**
   * This class is a specialization of a Trilinos vector to a localized
   * version. The purpose of this class is to provide a copy interface
   * from the possibly parallel Vector class to a local vector on each
   * processor, in order to be able to access all elements in the vector
   * or to apply certain deal.II functions.
   *
   * @ingroup TrilinosWrappers
   * @ingroup Vectors
   * @author Martin Kronbichler, 2008
   */
  class Vector : public VectorBase
  {
  public:
    /**
     * Declare type for container size.
     */
    typedef dealii::types::global_dof_index size_type;

    /**
     * A variable that indicates whether this vector
     * supports distributed data storage. If true, then
     * this vector also needs an appropriate compress()
     * function that allows communicating recent set or
     * add operations to individual elements to be communicated
     * to other processors.
     *
     * For the current class, the variable equals
     * false, since it does not support parallel data storage.
     * If you do need parallel data storage, use
     * TrilinosWrappers::MPI::Vector.
     */
    static const bool supports_distributed_data = false;

    /**
     * Default constructor that
     * generates an empty (zero size)
     * vector. The function
     * <tt>reinit()</tt> will have to
     * give the vector the correct
     * size.
     */
    Vector ();

    /**
     * This constructor takes as
     * input the number of elements
     * in the vector.
     */
    explicit Vector (const size_type n);

    /**
     * This constructor takes as
     * input the number of elements
     * in the vector. If the map is
     * not localized, i.e., if there
     * are some elements that are not
     * present on all processes, only
     * the global size of the map
     * will be taken and a localized
     * map will be generated
     * internally.
     */
    explicit Vector (const Epetra_Map &partitioning);

    /**
     * This constructor takes as input
     * the number of elements in the
     * vector. If the index set is not
     * localized, i.e., if there are some
     * elements that are not present on
     * all processes, only the global
     * size of the index set will be
     * taken and a localized version will
     * be generated internally.
     */
    explicit Vector (const IndexSet &partitioning,
                     const MPI_Comm &communicator = MPI_COMM_WORLD);

    /**
     * This constructor takes a
     * (possibly parallel) Trilinos
     * Vector and generates a
     * localized version of the whole
     * content on each processor.
     */
    explicit Vector (const VectorBase &V);

    /**
     * Copy-constructor from deal.II
     * vectors. Sets the dimension to that
     * of the given vector, and copies all
     * elements.
     */
    template <typename Number>
    explicit Vector (const dealii::Vector<Number> &v);

    /**
     * Reinit function that resizes
     * the vector to the size
     * specified by <tt>n</tt>.
     */
    void reinit (const size_type n,
                 const bool      fast = false);

    /**
     * Initialization with an
     * Epetra_Map. Similar to the call in
     * the other class MPI::Vector, with
     * the difference that now a copy on
     * all processes is generated. This
     * initialization function is
     * appropriate when the data in the
     * localized vector should be
     * imported from a distributed vector
     * that has been initialized with the
     * same communicator. The variable
     * <tt>fast</tt> determines whether
     * the vector should be filled with
     * zero or left untouched.
     */
    void reinit (const Epetra_Map &input_map,
                 const bool        fast = false);

    /**
     * Initialization with an
     * IndexSet. Similar to the call in the
     * other class MPI::Vector, with the
     * difference that now a copy on all
     * processes is generated. This
     * initialization function is
     * appropriate in case the data in the
     * localized vector should be imported
     * from a distributed vector that has
     * been initialized with the same
     * communicator. The variable
     * <tt>fast</tt> determines whether the
     * vector should be filled with zero
     * (false) or left untouched (true).
     */
    void reinit (const IndexSet   &input_map,
                 const MPI_Comm   &communicator = MPI_COMM_WORLD,
                 const bool        fast = false);

    /**
     * Reinit function. Takes the
     * information of a Vector and copies
     * everything to the calling vector,
     * now also allowing different maps.
     */
    void reinit (const VectorBase &V,
                 const bool        fast = false,
                 const bool        allow_different_maps = false);

    /**
     * Set all components of the
     * vector to the given number @p
     * s. Simply pass this down to
     * the base class, but we still
     * need to declare this function
     * to make the example given in
     * the discussion about making
     * the constructor explicit work.
     */
    Vector &operator = (const TrilinosScalar s);

    /**
     * Sets the left hand argument to
     * the (parallel) Trilinos
     * Vector. Equivalent to the @p
     * reinit function.
     */
    Vector &
    operator = (const MPI::Vector &V);

    /**
     * Sets the left hand argument to
     * the deal.II vector.
     */
    template <typename Number>
    Vector &
    operator = (const ::dealii::Vector<Number> &V);

    /**
     * Copy operator. Copies both the
     * dimension and the content in
     * the right hand argument.
     */
    Vector &
    operator = (const Vector &V);

    /**
     * This function does nothing but is
     * there for compatibility with the
     * @p PETScWrappers::Vector class.
     *
     * For the PETSc vector wrapper class,
     * this function updates the ghost
     * values of the PETSc vector. This
     * is necessary after any modification
     * before reading ghost values.
     *
     * However, for the implementation of
     * this class, it is immaterial and thus
     * an empty function.
     */
    void update_ghost_values () const;
  };



// ------------------- inline and template functions --------------


  /**
   * Global function @p swap which overloads the default implementation
   * of the C++ standard library which uses a temporary object. The
   * function simply exchanges the data of the two vectors.
   *
   * @relates TrilinosWrappers::Vector
   * @author Martin Kronbichler, Wolfgang Bangerth, 2008
   */
  inline
  void swap (Vector &u, Vector &v)
  {
    u.swap (v);
  }


#ifndef DOXYGEN

  template <typename number>
  Vector::Vector (const dealii::Vector<number> &v)
  {
    Epetra_LocalMap map ((TrilinosWrappers::types::int_type)v.size(), 0, Utilities::Trilinos::comm_self());
    vector.reset (new Epetra_FEVector(map));
    *this = v;
  }



  inline
  Vector &
  Vector::operator = (const TrilinosScalar s)
  {
    VectorBase::operator = (s);

    return *this;
  }



  template <typename Number>
  Vector &
  Vector::operator = (const ::dealii::Vector<Number> &v)
  {
    if (size() != v.size())
      {
        vector.reset();

        Epetra_LocalMap map ((TrilinosWrappers::types::int_type)v.size(), 0,
                             Utilities::Trilinos::comm_self());
        vector.reset (new Epetra_FEVector(map));
      }

    const Epetra_Map &map = vector_partitioner();
    const TrilinosWrappers::types::int_type size = map.NumMyElements();

    Assert (map.MaxLID() == size-1,
            ExcDimensionMismatch(map.MaxLID(), size-1));

    // Need to copy out values, since the
    // deal.II might not use doubles, so
    // that a direct access is not possible.
    for (TrilinosWrappers::types::int_type i=0; i<size; ++i)
      (*vector)[0][i] = v(i);

    return *this;
  }



  inline
  void
  Vector::update_ghost_values () const
  {}


#endif


}


/*@}*/

DEAL_II_NAMESPACE_CLOSE

#endif // DEAL_II_WITH_TRILINOS

/*----------------------------   trilinos_vector.h     ---------------------------*/

#endif
/*----------------------------   trilinos_vector.h     ---------------------------*/