Commit f9da447f authored by Aart Bik's avatar Aart Bik
Browse files

[mlir][sparse] clean up a few TODOs and layout

Removed some TODOs that are not likely to be done
or are already recorded through git tracker.

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D156836
parent 1f0d24ce
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -12,4 +12,3 @@ set(LLVM_TARGET_DEFINITIONS SparseTensorTypes.td)
mlir_tablegen(SparseTensorTypes.h.inc -gen-typedef-decls)
mlir_tablegen(SparseTensorTypes.cpp.inc -gen-typedef-defs)
add_public_tablegen_target(MLIRSparseTensorTypesIncGen)
+0 −7
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@ namespace sparse_tensor {
/// values with the built-in type "index".  For now, we simply assume that
/// type is 64-bit, but targets with different "index" bitwidths should
/// link with an alternatively built runtime support library.
// TODO: support such targets?
using index_type = uint64_t;

/// Encoding of overhead types (both position overhead and coordinate
@@ -92,11 +91,6 @@ enum class PrimaryType : uint32_t {
};

// This x-macro includes all `V` types.
// TODO: We currently split out the non-variadic version from the variadic
// version. Using ##__VA_ARGS__ to avoid the split gives
//   warning: token pasting of ',' and __VA_ARGS__ is a GNU extension
//   [-Wgnu-zero-variadic-macro-arguments]
// and __VA_OPT__(, ) __VA_ARGS__ requires c++20.
#define MLIR_SPARSETENSOR_FOREVERY_V(DO)                                       \
  DO(F64, double)                                                              \
  DO(F32, float)                                                               \
@@ -205,7 +199,6 @@ enum class LevelFormat : uint8_t {
/// Returns string representation of the given dimension level type.
constexpr const char *toMLIRString(DimLevelType dlt) {
  switch (dlt) {
  // TODO: should probably raise an error instead of printing it...
  case DimLevelType::Undef:
    return "undef";
  case DimLevelType::Dense:
+6 −8
Original line number Diff line number Diff line
@@ -153,9 +153,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
      stored with compression while dense storage is used within each block
      (although hybrid schemes are possible as well).

      TODO: the following example is out-of-date and will be implemented
      in a different manner than described here.
      (This will be corrected in an upcoming change that completely
      (The following will be corrected in an upcoming change that completely
      overhauls the syntax of this attribute.)

      The dimToLvl mapping also provides a notion of "counting a
@@ -439,11 +437,11 @@ def AnyRankedSparseTensor : RankedSparseTensorOf<[AnyType]>;
// Sparse Tensor Sorting Algorithm Attribute.
//===----------------------------------------------------------------------===//

// TODO: Currently, we only provide four implementations, and expose the
// implementations via attribute algorithm. In the future, if we will need
// to support both stable and non-stable quick sort, we may add
// quick_sort_nonstable enum to the attribute. Alternative, we may use two
// attributes, (stable|nonstable, algorithm), to specify a sorting
// Currently, we only provide four implementations, and expose the
// implementations via attribute algorithm. In the future, if we will
// need to support both stable and non-stable quick sort, we may add
// quick_sort_nonstable enum to the attribute. Alternative, we may use
// two attributes, (stable|nonstable, algorithm), to specify a sorting
// implementation.
//
// --------------------------------------------------------------------------
+0 −8
Original line number Diff line number Diff line
@@ -762,14 +762,6 @@ def SparseTensor_OutOp : SparseTensor_Op<"out", []>,
//===----------------------------------------------------------------------===//

def SparseTensor_SortOp : SparseTensor_Op<"sort", [AttrSizedOperandSegments]>,
    // TODO: May want to extend tablegen with
    // class NonemptyVariadic<Type type> : Variadic<type> { let minSize = 1; }
    // and then use NonemptyVariadic<...>:$xs here.
    //
    // TODO: Currently tablegen doesn't support the assembly syntax when
    // `algorithm` is an optional enum attribute. We may want to use an optional
    // enum attribute when this is fixed in tablegen.
    //
    Arguments<(ins Index:$n,
               Variadic<StridedMemRefRankOf<[AnyInteger, Index], [1]>>:$xs,
               Variadic<StridedMemRefRankOf<[AnyType], [1]>>:$ys,
+3 −3
Original line number Diff line number Diff line
@@ -107,7 +107,7 @@ using FieldIndex = unsigned;
/// encoding.
class StorageLayout {
public:
  // TODO: Functions/methods marked with [NUMFIELDS] might should use
  // TODO: Functions/methods marked with [NUMFIELDS] should use
  // `FieldIndex` for their return type, via the same reasoning for why
  // `Dimension`/`Level` are used both for identifiers and ranks.
  explicit StorageLayout(const SparseTensorType &stt)
@@ -154,12 +154,12 @@ private:
// Wrapper functions to invoke StorageLayout-related method.
//

// TODO: See note [NUMFIELDS].
// See note [NUMFIELDS].
inline unsigned getNumFieldsFromEncoding(SparseTensorEncodingAttr enc) {
  return StorageLayout(enc).getNumFields();
}

// TODO: See note [NUMFIELDS].
// See note [NUMFIELDS].
inline unsigned getNumDataFieldsFromEncoding(SparseTensorEncodingAttr enc) {
  return StorageLayout(enc).getNumDataFields();
}
Loading