Commit 2e9e1bab authored by Mccaskey, Alex's avatar Mccaskey, Alex
Browse files

updating Eigen unsupported to latest from dev



Signed-off-by: Mccaskey, Alex's avatarAlex McCaskey <mccaskeyaj@ornl.gov>
parent 8ae5ae9e
Pipeline #39760 passed with stages
in 10 minutes and 42 seconds
add_subdirectory(Eigen)
add_subdirectory(doc EXCLUDE_FROM_ALL)
if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
add_subdirectory(test) # can't do EXCLUDE_FROM_ALL here, breaks CTest
else()
add_subdirectory(test EXCLUDE_FROM_ALL)
if(BUILD_TESTING)
if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
add_subdirectory(test) # can't do EXCLUDE_FROM_ALL here, breaks CTest
else()
add_subdirectory(test EXCLUDE_FROM_ALL)
endif()
endif()
......@@ -40,7 +40,7 @@
# undef realloc
#endif
#include <Eigen/Core>
#include "../../Eigen/Core"
namespace Eigen {
......
......@@ -10,7 +10,9 @@
#ifndef EIGEN_ALIGNED_VECTOR3
#define EIGEN_ALIGNED_VECTOR3
#include <Eigen/Geometry>
#include "../../Eigen/Geometry"
#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
namespace Eigen {
......@@ -221,4 +223,6 @@ struct evaluator<AlignedVector3<Scalar> >
}
#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_ALIGNED_VECTOR3
......@@ -9,9 +9,7 @@
#ifndef EIGEN_ARPACKSUPPORT_MODULE_H
#define EIGEN_ARPACKSUPPORT_MODULE_H
#include <Eigen/Core>
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
#include "../../Eigen/Core"
/** \defgroup ArpackSupport_Module Arpack support module
*
......@@ -22,10 +20,12 @@
* \endcode
*/
#include <Eigen/SparseCholesky>
#include "../../Eigen/SparseCholesky"
#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h"
#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_ARPACKSUPPORT_MODULE_H
/* vim: set filetype=cpp et sw=2 ts=2 ai: */
......@@ -28,11 +28,17 @@ namespace Eigen {
//@{
}
#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "src/AutoDiff/AutoDiffScalar.h"
// #include "src/AutoDiff/AutoDiffVector.h"
#include "src/AutoDiff/AutoDiffJacobian.h"
#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
namespace Eigen {
//@}
}
......
......@@ -10,9 +10,9 @@
#ifndef EIGEN_BVH_MODULE_H
#define EIGEN_BVH_MODULE_H
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/StdVector>
#include "../../Eigen/Core"
#include "../../Eigen/Geometry"
#include "../../Eigen/StdVector"
#include <algorithm>
#include <queue>
......
......@@ -19,16 +19,16 @@
#undef isnan
#undef isinf
#undef isfinite
#include <SYCL/sycl.hpp>
#include <CL/sycl.hpp>
#include <iostream>
#include <map>
#include <memory>
#include <utility>
#endif
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
#include "../SpecialFunctions"
#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "src/util/CXX11Meta.h"
#include "src/util/MaxSizeVector.h"
......@@ -40,6 +40,8 @@
* \code
* #include <Eigen/CXX11/Tensor>
* \endcode
*
* Much of the documentation can be found \ref eigen_tensors "here".
*/
#include <cmath>
......@@ -80,12 +82,16 @@ typedef unsigned __int64 uint64_t;
#endif
#ifdef EIGEN_USE_GPU
#include <iostream>
#include <cuda_runtime.h>
#if __cplusplus >= 201103L
#include <atomic>
#include <unistd.h>
#endif
#include <iostream>
#if defined(EIGEN_USE_HIP)
#include <hip/hip_runtime.h>
#else
#include <cuda_runtime.h>
#endif
#if __cplusplus >= 201103L
#include <atomic>
#include <unistd.h>
#endif
#endif
#include "src/Tensor/TensorMacros.h"
......@@ -95,7 +101,10 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorCostModel.h"
#include "src/Tensor/TensorDeviceDefault.h"
#include "src/Tensor/TensorDeviceThreadPool.h"
#include "src/Tensor/TensorDeviceCuda.h"
#include "src/Tensor/TensorDeviceGpu.h"
#ifndef gpu_assert
#define gpu_assert(x)
#endif
#include "src/Tensor/TensorDeviceSycl.h"
#include "src/Tensor/TensorIndexList.h"
#include "src/Tensor/TensorDimensionList.h"
......@@ -108,18 +117,19 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorGlobalFunctions.h"
#include "src/Tensor/TensorBase.h"
#include "src/Tensor/TensorBlock.h"
#include "src/Tensor/TensorEvaluator.h"
#include "src/Tensor/TensorExpr.h"
#include "src/Tensor/TensorReduction.h"
#include "src/Tensor/TensorReductionCuda.h"
#include "src/Tensor/TensorReductionGpu.h"
#include "src/Tensor/TensorArgMax.h"
#include "src/Tensor/TensorConcatenation.h"
#include "src/Tensor/TensorContractionMapper.h"
#include "src/Tensor/TensorContractionBlocking.h"
#include "src/Tensor/TensorContraction.h"
#include "src/Tensor/TensorContractionThreadPool.h"
#include "src/Tensor/TensorContractionCuda.h"
#include "src/Tensor/TensorContractionGpu.h"
#include "src/Tensor/TensorConversion.h"
#include "src/Tensor/TensorConvolution.h"
#include "src/Tensor/TensorFFT.h"
......@@ -141,6 +151,7 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorGenerator.h"
#include "src/Tensor/TensorAssign.h"
#include "src/Tensor/TensorScan.h"
#include "src/Tensor/TensorTrace.h"
#include "src/Tensor/TensorSycl.h"
#include "src/Tensor/TensorExecutor.h"
......@@ -154,6 +165,6 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorIO.h"
#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h"
//#endif // EIGEN_CXX11_TENSOR_MODULE
......@@ -10,9 +10,9 @@
#ifndef EIGEN_CXX11_TENSORSYMMETRY_MODULE
#define EIGEN_CXX11_TENSORSYMMETRY_MODULE
#include <unsupported/Eigen/CXX11/Tensor>
#include "Tensor"
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "src/util/CXX11Meta.h"
......@@ -33,7 +33,7 @@
#include "src/TensorSymmetry/StaticSymmetry.h"
#include "src/TensorSymmetry/DynamicSymmetry.h"
#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_CXX11_TENSORSYMMETRY_MODULE
......
......@@ -12,7 +12,7 @@
#include "../../../Eigen/Core"
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h"
/** \defgroup CXX11_ThreadPool_Module C++11 ThreadPool Module
*
......@@ -44,35 +44,31 @@
#include <thread>
#include <functional>
#include <memory>
#include "src/util/CXX11Meta.h"
#include "src/util/MaxSizeVector.h"
#include "src/ThreadPool/ThreadLocal.h"
#ifndef EIGEN_THREAD_LOCAL
// There are non-parenthesized calls to "max" in the <unordered_map> header,
// which trigger a check in test/main.h causing compilation to fail.
// We work around the check here by removing the check for max in
// the case where we have to emulate thread_local.
#ifdef max
#undef max
#endif
#include <unordered_map>
#endif
#include "src/ThreadPool/ThreadYield.h"
#include "src/ThreadPool/ThreadCancel.h"
#include "src/ThreadPool/EventCount.h"
#include "src/ThreadPool/RunQueue.h"
#include "src/ThreadPool/ThreadPoolInterface.h"
#include "src/ThreadPool/ThreadEnvironment.h"
#include "src/ThreadPool/SimpleThreadPool.h"
#include "src/ThreadPool/Barrier.h"
#include "src/ThreadPool/NonBlockingThreadPool.h"
// Use the more efficient NonBlockingThreadPool by default.
namespace Eigen {
#ifndef EIGEN_USE_SIMPLE_THREAD_POOL
template <typename Env> using ThreadPoolTempl = NonBlockingThreadPoolTempl<Env>;
typedef NonBlockingThreadPool ThreadPool;
#else
template <typename Env> using ThreadPoolTempl = SimpleThreadPoolTempl<Env>;
typedef SimpleThreadPool ThreadPool;
#endif
} // namespace Eigen
#endif
#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_CXX11_THREADPOOL_MODULE
# Eigen Tensors
# Eigen Tensors {#eigen_tensors}
Tensors are multidimensional arrays of elements. Elements are typically scalars,
but more complex types such as strings are also supported.
......@@ -8,7 +8,7 @@ but more complex types such as strings are also supported.
## Tensor Classes
You can manipulate a tensor with one of the following classes. They all are in
the namespace ```::Eigen.```
the namespace `::Eigen.`
### Class Tensor<data_type, rank>
......@@ -21,10 +21,10 @@ matrix.
Tensors of this class are resizable. For example, if you assign a tensor of a
different size to a Tensor, that tensor is resized to match its new value.
#### Constructor Tensor<data_type, rank>(size0, size1, ...)
#### Constructor `Tensor<data_type, rank>(size0, size1, ...)`
Constructor for a Tensor. The constructor must be passed ```rank``` integers
indicating the sizes of the instance along each of the the ```rank```
Constructor for a Tensor. The constructor must be passed `rank` integers
indicating the sizes of the instance along each of the the `rank`
dimensions.
// Create a tensor of rank 3 of sizes 2, 3, 4. This tensor owns
......@@ -34,18 +34,18 @@ dimensions.
// Resize t_3d by assigning a tensor of different sizes, but same rank.
t_3d = Tensor<float, 3>(3, 4, 3);
#### Constructor Tensor<data_type, rank>(size_array)
#### Constructor `Tensor<data_type, rank>(size_array)`
Constructor where the sizes for the constructor are specified as an array of
values instead of an explicitly list of parameters. The array type to use is
```Eigen::array<Eigen::Index>```. The array can be constructed automatically
`Eigen::array<Eigen::Index>`. The array can be constructed automatically
from an initializer list.
// Create a tensor of strings of rank 2 with sizes 5, 7.
Tensor<string, 2> t_2d({5, 7});
### Class TensorFixedSize<data_type, Sizes<size0, size1, ...>>
### Class `TensorFixedSize<data_type, Sizes<size0, size1, ...>>`
Class to use for tensors of fixed size, where the size is known at compile
time. Fixed sized tensors can provide very fast computations because all their
......@@ -57,7 +57,7 @@ tensor data is held onto the stack and does not cause heap allocation and free.
// Create a 4 x 3 tensor of floats.
TensorFixedSize<float, Sizes<4, 3>> t_4x3;
### Class TensorMap<Tensor<data_type, rank>>
### Class `TensorMap<Tensor<data_type, rank>>`
This is the class to use to create a tensor on top of memory allocated and
owned by another part of your code. It allows to view any piece of allocated
......@@ -67,7 +67,7 @@ data are stored.
A TensorMap is not resizable because it does not own the memory where its data
are stored.
#### Constructor TensorMap<Tensor<data_type, rank>>(data, size0, size1, ...)
#### Constructor `TensorMap<Tensor<data_type, rank>>(data, size0, size1, ...)`
Constructor for a Tensor. The constructor must be passed a pointer to the
storage for the data, and "rank" size attributes. The storage has to be
......@@ -75,28 +75,28 @@ large enough to hold all the data.
// Map a tensor of ints on top of stack-allocated storage.
int storage[128]; // 2 x 4 x 2 x 8 = 128
TensorMap<int, 4> t_4d(storage, 2, 4, 2, 8);
TensorMap<Tensor<int, 4>> t_4d(storage, 2, 4, 2, 8);
// The same storage can be viewed as a different tensor.
// You can also pass the sizes as an array.
TensorMap<int, 2> t_2d(storage, 16, 8);
TensorMap<Tensor<int, 2>> t_2d(storage, 16, 8);
// You can also map fixed-size tensors. Here we get a 1d view of
// the 2d fixed-size tensor.
Tensor<float, Sizes<4, 5>> t_4x3;
TensorMap<float, 1> t_12(t_4x3, 12);
TensorFixedSize<float, Sizes<4, 5>> t_4x3;
TensorMap<Tensor<float, 1>> t_12(t_4x3.data(), 12);
#### Class TensorRef
#### Class `TensorRef`
See Assigning to a TensorRef below.
## Accessing Tensor Elements
#### <data_type> tensor(index0, index1...)
#### `<data_type> tensor(index0, index1...)`
Return the element at position ```(index0, index1...)``` in tensor
```tensor```. You must pass as many parameters as the rank of ```tensor```.
Return the element at position `(index0, index1...)` in tensor
`tensor`. You must pass as many parameters as the rank of `tensor`.
The expression can be used as an l-value to set the value of the element at the
specified position. The value returned is of the datatype of the tensor.
......@@ -121,8 +121,8 @@ specified position. The value returned is of the datatype of the tensor.
## TensorLayout
The tensor library supports 2 layouts: ```ColMajor``` (the default) and
```RowMajor```. Only the default column major layout is currently fully
The tensor library supports 2 layouts: `ColMajor` (the default) and
`RowMajor`. Only the default column major layout is currently fully
supported, and it is therefore not recommended to attempt to use the row major
layout at the moment.
......@@ -136,7 +136,7 @@ All the arguments to an expression must use the same layout. Attempting to mix
different layouts will result in a compilation error.
It is possible to change the layout of a tensor or an expression using the
```swap_layout()``` method. Note that this will also reverse the order of the
`swap_layout()` method. Note that this will also reverse the order of the
dimensions.
Tensor<float, 2, ColMajor> col_major(2, 4);
......@@ -173,35 +173,35 @@ the following code computes the elementwise addition of two tensors:
Tensor<float, 3> t3 = t1 + t2;
While the code above looks easy enough, it is important to understand that the
expression ```t1 + t2``` is not actually adding the values of the tensors. The
expression `t1 + t2` is not actually adding the values of the tensors. The
expression instead constructs a "tensor operator" object of the class
TensorCwiseBinaryOp<scalar_sum>, which has references to the tensors
```t1``` and ```t2```. This is a small C++ object that knows how to add
```t1``` and ```t2```. It is only when the value of the expression is assigned
to the tensor ```t3``` that the addition is actually performed. Technically,
this happens through the overloading of ```operator=()``` in the Tensor class.
`t1` and `t2`. This is a small C++ object that knows how to add
`t1` and `t2`. It is only when the value of the expression is assigned
to the tensor `t3` that the addition is actually performed. Technically,
this happens through the overloading of `operator=()` in the Tensor class.
This mechanism for computing tensor expressions allows for lazy evaluation and
optimizations which are what make the tensor library very fast.
Of course, the tensor operators do nest, and the expression ```t1 + t2 *
0.3f``` is actually represented with the (approximate) tree of operators:
Of course, the tensor operators do nest, and the expression `t1 + t2 * 0.3f`
is actually represented with the (approximate) tree of operators:
TensorCwiseBinaryOp<scalar_sum>(t1, TensorCwiseUnaryOp<scalar_mul>(t2, 0.3f))
### Tensor Operations and C++ "auto"
Because Tensor operations create tensor operators, the C++ ```auto``` keyword
Because Tensor operations create tensor operators, the C++ `auto` keyword
does not have its intuitive meaning. Consider these 2 lines of code:
Tensor<float, 3> t3 = t1 + t2;
auto t4 = t1 + t2;
In the first line we allocate the tensor ```t3``` and it will contain the
result of the addition of ```t1``` and ```t2```. In the second line, ```t4```
In the first line we allocate the tensor `t3` and it will contain the
result of the addition of `t1` and `t2`. In the second line, `t4`
is actually the tree of tensor operators that will compute the addition of
```t1``` and ```t2```. In fact, ```t4``` is *not* a tensor and you cannot get
`t1` and `t2`. In fact, `t4` is *not* a tensor and you cannot get
the values of its elements:
Tensor<float, 3> t3 = t1 + t2;
......@@ -210,8 +210,8 @@ the values of its elements:
auto t4 = t1 + t2;
cout << t4(0, 0, 0); // Compilation error!
When you use ```auto``` you do not get a Tensor as a result but instead a
non-evaluated expression. So only use ```auto``` to delay evaluation.
When you use `auto` you do not get a Tensor as a result but instead a
non-evaluated expression. So only use `auto` to delay evaluation.
Unfortunately, there is no single underlying concrete type for holding
non-evaluated expressions, hence you have to use auto in the case when you do
......@@ -257,9 +257,9 @@ There are several ways to control when expressions are evaluated:
#### Assigning to a Tensor, TensorFixedSize, or TensorMap.
The most common way to evaluate an expression is to assign it to a Tensor. In
the example below, the ```auto``` declarations make the intermediate values
the example below, the `auto` declarations make the intermediate values
"Operations", not Tensors, and do not cause the expressions to be evaluated.
The assignment to the Tensor ```result``` causes the evaluation of all the
The assignment to the Tensor `result` causes the evaluation of all the
operations.
auto t3 = t1 + t2; // t3 is an Operation.
......@@ -272,17 +272,17 @@ Operation to a TensorFixedSize instead of a Tensor, which is a bit more
efficient.
// We know that the result is a 4x4x2 tensor!
TensorFixedSize<float, 4, 4, 2> result = t5;
TensorFixedSize<float, Sizes<4, 4, 2>> result = t5;
Simiarly, assigning an expression to a TensorMap causes its evaluation. Like
tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to
have the rank and sizes of the expression that are assigned to them.
#### Calling eval().
#### Calling `eval()`.
When you compute large composite expressions, you sometimes want to tell Eigen
that an intermediate value in the expression tree is worth evaluating ahead of
time. This is done by inserting a call to the ```eval()``` method of the
time. This is done by inserting a call to the `eval()` method of the
expression Operation.
// The previous example could have been written:
......@@ -291,15 +291,15 @@ expression Operation.
// If you want to compute (t1 + t2) once ahead of time you can write:
Tensor<float, 3> result = ((t1 + t2).eval() * 0.2f).exp();
Semantically, calling ```eval()``` is equivalent to materializing the value of
Semantically, calling `eval()` is equivalent to materializing the value of
the expression in a temporary Tensor of the right size. The code above in
effect does:
// .eval() knows the size!
TensorFixedSize<float, 4, 4, 2> tmp = t1 + t2;
TensorFixedSize<float, Sizes<4, 4, 2>> tmp = t1 + t2;
Tensor<float, 3> result = (tmp * 0.2f).exp();
Note that the return value of ```eval()``` is itself an Operation, so the
Note that the return value of `eval()` is itself an Operation, so the
following code does not do what you may think:
// Here t3 is an evaluation Operation. t3 has not been evaluated yet.
......@@ -312,24 +312,24 @@ following code does not do what you may think:
// an intermediate tensor to represent t3.x
Tensor<float, 3> result = t4;
While in the examples above calling ```eval()``` does not make a difference in
While in the examples above calling `eval()` does not make a difference in
performance, in other cases it can make a huge difference. In the expression
below the ```broadcast()``` expression causes the ```X.maximum()``` expression
below the `broadcast()` expression causes the `X.maximum()` expression
to be evaluated many times:
Tensor<...> X ...;
Tensor<...> Y = ((X - X.maximum(depth_dim).reshape(dims2d).broadcast(bcast))
* beta).exp();
Inserting a call to ```eval()``` between the ```maximum()``` and
```reshape()``` calls guarantees that maximum() is only computed once and
Inserting a call to `eval()` between the `maximum()` and
`reshape()` calls guarantees that maximum() is only computed once and
greatly speeds-up execution:
Tensor<...> Y =
((X - X.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast))
* beta).exp();
In the other example below, the tensor ```Y``` is both used in the expression
In the other example below, the tensor `Y` is both used in the expression
and its assignment. This is an aliasing problem and if the evaluation is not
done in the right order Y will be updated incrementally during the evaluation
resulting in bogus results:
......@@ -337,8 +337,8 @@ resulting in bogus results:
Tensor<...> Y ...;
Y = Y / (Y.sum(depth_dim).reshape(dims2d).broadcast(bcast));
Inserting a call to ```eval()``` between the ```sum()``` and ```reshape()```
expressions ensures that the sum is computed before any updates to ```Y``` are
Inserting a call to `eval()` between the `sum()` and `reshape()`
expressions ensures that the sum is computed before any updates to `Y` are
done.
Y = Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast));
......@@ -347,21 +347,21 @@ Note that an eval around the full right hand side expression is not needed
because the generated has to compute the i-th value of the right hand side
before assigning it to the left hand side.
However, if you were assigning the expression value to a shuffle of ```Y```
then you would need to force an eval for correctness by adding an ```eval()```
However, if you were assigning the expression value to a shuffle of `Y`
then you would need to force an eval for correctness by adding an `eval()`
call for the right hand side:
Y.shuffle(...) =
(Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast))).eval();
#### Assigning to a TensorRef.
#### Assigning to a `TensorRef`.
If you need to access only a few elements from the value of an expression you
can avoid materializing the value in a full tensor by using a TensorRef.
A TensorRef is a small wrapper class for any Eigen Operation. It provides
overloads for the ```()``` operator that let you access individual values in
overloads for the `()` operator that let you access individual values in
the expression. TensorRef is convenient, because the Operation themselves do
not provide a way to access individual elements.
......@@ -390,7 +390,7 @@ such as contractions and convolutions. The implementations are optimized for
different environments: single threaded on CPU, multi threaded on CPU, or on a
GPU using cuda. Additional implementations may be added later.
You can choose which implementation to use with the ```device()``` call. If
You can choose which implementation to use with the `device()` call. If
you do not choose an implementation explicitly the default implementation that
uses a single thread on the CPU is used.
......@@ -406,7 +406,7 @@ single-threaded CPU implementation:
Tensor<float, 2> b(30, 40);
Tensor<float, 2> c = a + b;
To choose a different implementation you have to insert a ```device()``` call
To choose a different implementation you have to insert a `device()` call
before the assignment of the result. For technical C++ reasons this requires
that the Tensor for the result be declared on its own. This means that you
have to know the size of the result.
......@@ -414,16 +414,16 @@ have to know the size of the result.
Eigen::Tensor<float, 2> c(30, 40);
c.device(...) = a + b;
The call to ```device()``` must be the last call on the left of the operator=.
The call to `device()` must be the last call on the left of the operator=.
You must pass to the ```device()``` call an Eigen device object. There are
You must pass to the `device()` call an Eigen device object. There are
presently three devices you can use: DefaultDevice, ThreadPoolDevice and
GpuDevice.
#### Evaluating With the DefaultDevice
This is exactly the same as not inserting a ```device()``` call.
This is exactly the same as not inserting a `device()` call.
DefaultDevice my_device;
c.device(my_device) = a + b;
......@@ -452,24 +452,24 @@ memory for tensors with cuda.
In the documentation of the tensor methods and Operation we mention datatypes
that are tensor-type specific:
#### <Tensor-Type>::Dimensions
#### `<Tensor-Type>::``Dimensions`
Acts like an array of ints. Has an ```int size``` attribute, and can be
Acts like an array of ints. Has an `int size` attribute, and can be
indexed like an array to access individual values. Used to represent the
dimensions of a tensor. See ```dimensions()```.
dimensions of a tensor. See `dimensions()`.
#### <Tensor-Type>::Index
#### `<Tensor-Type>::``Index`
Acts like an ```int```. Used for indexing tensors along their dimensions. See
```operator()```, ```dimension()```, and ```size()```.
Acts like an `int`. Used for indexing tensors along their dimensions. See
`operator()`, `dimension()`, and `size()`.