You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by we...@apache.org on 2019/12/24 20:39:58 UTC

[arrow] branch master updated: ARROW-7420: [C++] Migrate tensor related APIs to Result-returning version

This is an automated email from the ASF dual-hosted git repository.

wesm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/master by this push:
     new 7b782e5  ARROW-7420: [C++] Migrate tensor related APIs to Result-returning version
7b782e5 is described below

commit 7b782e573529f53af1b88a8f2741449a2345d5f4
Author: Kenta Murata <mr...@mrkn.jp>
AuthorDate: Tue Dec 24 14:39:43 2019 -0600

    ARROW-7420: [C++] Migrate tensor related APIs to Result-returning version
    
    I'd like to modify tensor-related APIs, both internal and public, to use `Result<T>` for the return values.
    
    Moreover, I've fixed a bug of `SparseCSXIndex::ValidateShape` for CSC format.
    
    Closes #6070 from mrkn/ARROW-7420 and squashes the following commits:
    
    ac5d402f0 <Kenta Murata> Fix pyarrow
    7fa4642eb <Kenta Murata> Fix arrow-glib
    5b3b3ee54 <Kenta Murata> Let tensor related APIs use Result<T> for return types
    ee20671d1 <Kenta Murata> Fix SparseCSXIndex::ValidateShape for CSC format
    74a76507b <Kenta Murata> Fix the validation of tensor shape elements
    
    Authored-by: Kenta Murata <mr...@mrkn.jp>
    Signed-off-by: Wes McKinney <we...@apache.org>
---
 c_glib/arrow-glib/input-stream.cpp     |  8 ++--
 cpp/src/arrow/ipc/metadata_internal.cc | 32 ++++++++------
 cpp/src/arrow/ipc/metadata_internal.h  | 17 ++++----
 cpp/src/arrow/ipc/read_write_test.cc   |  8 ++--
 cpp/src/arrow/ipc/reader.cc            | 80 +++++++++++++++-------------------
 cpp/src/arrow/ipc/reader.h             | 31 +++++--------
 cpp/src/arrow/ipc/writer.cc            |  8 ++--
 cpp/src/arrow/python/deserialize.cc    | 18 ++++----
 cpp/src/arrow/sparse_tensor.cc         |  2 +-
 cpp/src/arrow/sparse_tensor.h          |  2 +-
 cpp/src/arrow/tensor.cc                |  2 +-
 python/pyarrow/includes/libarrow.pxd   |  2 +-
 python/pyarrow/ipc.pxi                 |  2 +-
 13 files changed, 98 insertions(+), 114 deletions(-)

diff --git a/c_glib/arrow-glib/input-stream.cpp b/c_glib/arrow-glib/input-stream.cpp
index f464061..8b1bbe0 100644
--- a/c_glib/arrow-glib/input-stream.cpp
+++ b/c_glib/arrow-glib/input-stream.cpp
@@ -290,11 +290,9 @@ garrow_input_stream_read_tensor(GArrowInputStream *input_stream,
 {
   auto arrow_input_stream = garrow_input_stream_get_raw(input_stream);
 
-  std::shared_ptr<arrow::Tensor> arrow_tensor;
-  auto status = arrow::ipc::ReadTensor(arrow_input_stream.get(),
-                                       &arrow_tensor);
-  if (garrow_error_check(error, status, "[input-stream][read-tensor]")) {
-    return garrow_tensor_new_raw(&arrow_tensor);
+  auto arrow_tensor = arrow::ipc::ReadTensor(arrow_input_stream.get());
+  if (garrow::check(error, arrow_tensor, "[input-stream][read-tensor]")) {
+    return garrow_tensor_new_raw(&(arrow_tensor.ValueOrDie()));
   } else {
     return NULL;
   }
diff --git a/cpp/src/arrow/ipc/metadata_internal.cc b/cpp/src/arrow/ipc/metadata_internal.cc
index 5ddaa5e..55cc8b7 100644
--- a/cpp/src/arrow/ipc/metadata_internal.cc
+++ b/cpp/src/arrow/ipc/metadata_internal.cc
@@ -848,13 +848,14 @@ Status SchemaToFlatbuffer(FBB& fbb, const Schema& schema, DictionaryMemo* dictio
   return Status::OK();
 }
 
-Status WriteFBMessage(FBB& fbb, flatbuf::MessageHeader header_type,
-                      flatbuffers::Offset<void> header, int64_t body_length,
-                      std::shared_ptr<Buffer>* out) {
+Result<std::shared_ptr<Buffer>> WriteFBMessage(FBB& fbb,
+                                               flatbuf::MessageHeader header_type,
+                                               flatbuffers::Offset<void> header,
+                                               int64_t body_length) {
   auto message = flatbuf::CreateMessage(fbb, kCurrentMetadataVersion, header_type, header,
                                         body_length);
   fbb.Finish(message);
-  return WriteFlatbufferBuilder(fbb, out);
+  return WriteFlatbufferBuilder(fbb);
 }
 
 using FieldNodeVector =
@@ -1055,7 +1056,8 @@ Status WriteSchemaMessage(const Schema& schema, DictionaryMemo* dictionary_memo,
   FBB fbb;
   flatbuffers::Offset<flatbuf::Schema> fb_schema;
   RETURN_NOT_OK(SchemaToFlatbuffer(fbb, schema, dictionary_memo, &fb_schema));
-  return WriteFBMessage(fbb, flatbuf::MessageHeader_Schema, fb_schema.Union(), 0, out);
+  return WriteFBMessage(fbb, flatbuf::MessageHeader_Schema, fb_schema.Union(), 0)
+      .Value(out);
 }
 
 Status WriteRecordBatchMessage(int64_t length, int64_t body_length,
@@ -1066,11 +1068,12 @@ Status WriteRecordBatchMessage(int64_t length, int64_t body_length,
   RecordBatchOffset record_batch;
   RETURN_NOT_OK(MakeRecordBatch(fbb, length, body_length, nodes, buffers, &record_batch));
   return WriteFBMessage(fbb, flatbuf::MessageHeader_RecordBatch, record_batch.Union(),
-                        body_length, out);
+                        body_length)
+      .Value(out);
 }
 
-Status WriteTensorMessage(const Tensor& tensor, int64_t buffer_start_offset,
-                          std::shared_ptr<Buffer>* out) {
+Result<std::shared_ptr<Buffer>> WriteTensorMessage(const Tensor& tensor,
+                                                   int64_t buffer_start_offset) {
   using TensorDimOffset = flatbuffers::Offset<flatbuf::TensorDim>;
   using TensorOffset = flatbuffers::Offset<flatbuf::Tensor>;
 
@@ -1101,18 +1104,18 @@ Status WriteTensorMessage(const Tensor& tensor, int64_t buffer_start_offset,
       flatbuf::CreateTensor(fbb, fb_type_type, fb_type, fb_shape, fb_strides, &buffer);
 
   return WriteFBMessage(fbb, flatbuf::MessageHeader_Tensor, fb_tensor.Union(),
-                        body_length, out);
+                        body_length);
 }
 
-Status WriteSparseTensorMessage(const SparseTensor& sparse_tensor, int64_t body_length,
-                                const std::vector<BufferMetadata>& buffers,
-                                std::shared_ptr<Buffer>* out) {
+Result<std::shared_ptr<Buffer>> WriteSparseTensorMessage(
+    const SparseTensor& sparse_tensor, int64_t body_length,
+    const std::vector<BufferMetadata>& buffers) {
   FBB fbb;
   SparseTensorOffset fb_sparse_tensor;
   RETURN_NOT_OK(
       MakeSparseTensor(fbb, sparse_tensor, body_length, buffers, &fb_sparse_tensor));
   return WriteFBMessage(fbb, flatbuf::MessageHeader_SparseTensor,
-                        fb_sparse_tensor.Union(), body_length, out);
+                        fb_sparse_tensor.Union(), body_length);
 }
 
 Status WriteDictionaryMessage(int64_t id, int64_t length, int64_t body_length,
@@ -1124,7 +1127,8 @@ Status WriteDictionaryMessage(int64_t id, int64_t length, int64_t body_length,
   RETURN_NOT_OK(MakeRecordBatch(fbb, length, body_length, nodes, buffers, &record_batch));
   auto dictionary_batch = flatbuf::CreateDictionaryBatch(fbb, id, record_batch).Union();
   return WriteFBMessage(fbb, flatbuf::MessageHeader_DictionaryBatch, dictionary_batch,
-                        body_length, out);
+                        body_length)
+      .Value(out);
 }
 
 static flatbuffers::Offset<flatbuffers::Vector<const flatbuf::Block*>>
diff --git a/cpp/src/arrow/ipc/metadata_internal.h b/cpp/src/arrow/ipc/metadata_internal.h
index 5d7aba7..f7e09af 100644
--- a/cpp/src/arrow/ipc/metadata_internal.h
+++ b/cpp/src/arrow/ipc/metadata_internal.h
@@ -147,12 +147,12 @@ Status WriteRecordBatchMessage(const int64_t length, const int64_t body_length,
                                const std::vector<BufferMetadata>& buffers,
                                std::shared_ptr<Buffer>* out);
 
-Status WriteTensorMessage(const Tensor& tensor, const int64_t buffer_start_offset,
-                          std::shared_ptr<Buffer>* out);
+Result<std::shared_ptr<Buffer>> WriteTensorMessage(const Tensor& tensor,
+                                                   const int64_t buffer_start_offset);
 
-Status WriteSparseTensorMessage(const SparseTensor& sparse_tensor, int64_t body_length,
-                                const std::vector<BufferMetadata>& buffers,
-                                std::shared_ptr<Buffer>* out);
+Result<std::shared_ptr<Buffer>> WriteSparseTensorMessage(
+    const SparseTensor& sparse_tensor, int64_t body_length,
+    const std::vector<BufferMetadata>& buffers);
 
 Status WriteFileFooter(const Schema& schema, const std::vector<FileBlock>& dictionaries,
                        const std::vector<FileBlock>& record_batches,
@@ -164,8 +164,8 @@ Status WriteDictionaryMessage(const int64_t id, const int64_t length,
                               const std::vector<BufferMetadata>& buffers,
                               std::shared_ptr<Buffer>* out);
 
-static inline Status WriteFlatbufferBuilder(flatbuffers::FlatBufferBuilder& fbb,
-                                            std::shared_ptr<Buffer>* out) {
+static inline Result<std::shared_ptr<Buffer>> WriteFlatbufferBuilder(
+    flatbuffers::FlatBufferBuilder& fbb) {
   int32_t size = fbb.GetSize();
 
   std::shared_ptr<Buffer> result;
@@ -173,8 +173,7 @@ static inline Status WriteFlatbufferBuilder(flatbuffers::FlatBufferBuilder& fbb,
 
   uint8_t* dst = result->mutable_data();
   memcpy(dst, fbb.GetBufferPointer(), size);
-  *out = result;
-  return Status::OK();
+  return result;
 }
 
 }  // namespace internal
diff --git a/cpp/src/arrow/ipc/read_write_test.cc b/cpp/src/arrow/ipc/read_write_test.cc
index 30651fd..b1b2466 100644
--- a/cpp/src/arrow/ipc/read_write_test.cc
+++ b/cpp/src/arrow/ipc/read_write_test.cc
@@ -95,7 +95,7 @@ TEST(TestMessage, SerializeTo) {
                                     body_length));
 
   std::shared_ptr<Buffer> metadata;
-  ASSERT_OK(internal::WriteFlatbufferBuilder(fbb, &metadata));
+  ASSERT_OK_AND_ASSIGN(metadata, internal::WriteFlatbufferBuilder(fbb));
 
   std::string body = "abcdef";
 
@@ -1081,7 +1081,7 @@ class TestTensorRoundTrip : public ::testing::Test, public IpcTestFixture {
     ASSERT_OK(mmap_->Seek(0));
 
     std::shared_ptr<Tensor> result;
-    ASSERT_OK(ReadTensor(mmap_.get(), &result));
+    ASSERT_OK_AND_ASSIGN(result, ReadTensor(mmap_.get()));
 
     ASSERT_EQ(result->data()->size(), expected_body_length);
     ASSERT_TRUE(tensor.Equals(*result));
@@ -1167,7 +1167,7 @@ class TestSparseTensorRoundTrip : public ::testing::Test, public IpcTestFixture
     ASSERT_OK(mmap_->Seek(0));
 
     std::shared_ptr<SparseTensor> result;
-    ASSERT_OK(ReadSparseTensor(mmap_.get(), &result));
+    ASSERT_OK_AND_ASSIGN(result, ReadSparseTensor(mmap_.get()));
     ASSERT_EQ(SparseTensorFormat::COO, result->format_id());
 
     const auto& resulted_sparse_index =
@@ -1210,7 +1210,7 @@ class TestSparseTensorRoundTrip : public ::testing::Test, public IpcTestFixture
     ASSERT_OK(mmap_->Seek(0));
 
     std::shared_ptr<SparseTensor> result;
-    ASSERT_OK(ReadSparseTensor(mmap_.get(), &result));
+    ASSERT_OK_AND_ASSIGN(result, ReadSparseTensor(mmap_.get()));
 
     constexpr auto expected_format_id =
         std::is_same<SparseIndexType, SparseCSRIndex>::value ? SparseTensorFormat::CSR
diff --git a/cpp/src/arrow/ipc/reader.cc b/cpp/src/arrow/ipc/reader.cc
index f941116..7b81c73 100644
--- a/cpp/src/arrow/ipc/reader.cc
+++ b/cpp/src/arrow/ipc/reader.cc
@@ -840,21 +840,20 @@ Status ReadRecordBatch(const std::shared_ptr<Schema>& schema,
                          &buffer_reader, out);
 }
 
-Status ReadTensor(io::InputStream* file, std::shared_ptr<Tensor>* out) {
+Result<std::shared_ptr<Tensor>> ReadTensor(io::InputStream* file) {
   std::unique_ptr<Message> message;
   RETURN_NOT_OK(ReadContiguousPayload(file, &message));
-  return ReadTensor(*message, out);
+  return ReadTensor(*message);
 }
 
-Status ReadTensor(const Message& message, std::shared_ptr<Tensor>* out) {
+Result<std::shared_ptr<Tensor>> ReadTensor(const Message& message) {
   std::shared_ptr<DataType> type;
   std::vector<int64_t> shape;
   std::vector<int64_t> strides;
   std::vector<std::string> dim_names;
   RETURN_NOT_OK(internal::GetTensorMetadata(*message.metadata(), &type, &shape, &strides,
                                             &dim_names));
-  *out = std::make_shared<Tensor>(type, message.body(), shape, strides, dim_names);
-  return Status::OK();
+  return Tensor::Make(type, message.body(), shape, strides, dim_names);
 }
 
 namespace {
@@ -940,31 +939,28 @@ Result<std::shared_ptr<SparseIndex>> ReadSparseCSXIndex(
   }
 }
 
-Status MakeSparseTensorWithSparseCOOIndex(
+Result<std::shared_ptr<SparseTensor>> MakeSparseTensorWithSparseCOOIndex(
     const std::shared_ptr<DataType>& type, const std::vector<int64_t>& shape,
     const std::vector<std::string>& dim_names,
     const std::shared_ptr<SparseCOOIndex>& sparse_index, int64_t non_zero_length,
-    const std::shared_ptr<Buffer>& data, std::shared_ptr<SparseTensor>* out) {
-  *out = std::make_shared<SparseCOOTensor>(sparse_index, type, data, shape, dim_names);
-  return Status::OK();
+    const std::shared_ptr<Buffer>& data) {
+  return SparseCOOTensor::Make(sparse_index, type, data, shape, dim_names);
 }
 
-Status MakeSparseTensorWithSparseCSRIndex(
+Result<std::shared_ptr<SparseTensor>> MakeSparseTensorWithSparseCSRIndex(
     const std::shared_ptr<DataType>& type, const std::vector<int64_t>& shape,
     const std::vector<std::string>& dim_names,
     const std::shared_ptr<SparseCSRIndex>& sparse_index, int64_t non_zero_length,
-    const std::shared_ptr<Buffer>& data, std::shared_ptr<SparseTensor>* out) {
-  *out = std::make_shared<SparseCSRMatrix>(sparse_index, type, data, shape, dim_names);
-  return Status::OK();
+    const std::shared_ptr<Buffer>& data) {
+  return SparseCSRMatrix::Make(sparse_index, type, data, shape, dim_names);
 }
 
-Status MakeSparseTensorWithSparseCSCIndex(
+Result<std::shared_ptr<SparseTensor>> MakeSparseTensorWithSparseCSCIndex(
     const std::shared_ptr<DataType>& type, const std::vector<int64_t>& shape,
     const std::vector<std::string>& dim_names,
     const std::shared_ptr<SparseCSCIndex>& sparse_index, int64_t non_zero_length,
-    const std::shared_ptr<Buffer>& data, std::shared_ptr<SparseTensor>* out) {
-  *out = std::make_shared<SparseCSCMatrix>(sparse_index, type, data, shape, dim_names);
-  return Status::OK();
+    const std::shared_ptr<Buffer>& data) {
+  return SparseCSCMatrix::Make(sparse_index, type, data, shape, dim_names);
 }
 
 Status ReadSparseTensorMetadata(const Buffer& metadata,
@@ -1005,30 +1001,24 @@ namespace internal {
 
 namespace {
 
-Status GetSparseTensorBodyBufferCount(SparseTensorFormat::type format_id,
-                                      size_t* buffer_count) {
+Result<size_t> GetSparseTensorBodyBufferCount(SparseTensorFormat::type format_id) {
   switch (format_id) {
     case SparseTensorFormat::COO:
-      *buffer_count = 2;
-      break;
+      return 2;
 
     case SparseTensorFormat::CSR:
-      *buffer_count = 3;
-      break;
+      return 3;
 
     default:
       return Status::Invalid("Unrecognized sparse tensor format");
   }
-
-  return Status::OK();
 }
 
 Status CheckSparseTensorBodyBufferCount(
     const IpcPayload& payload, SparseTensorFormat::type sparse_tensor_format_id) {
   size_t expected_body_buffer_count = 0;
-
-  RETURN_NOT_OK(GetSparseTensorBodyBufferCount(sparse_tensor_format_id,
-                                               &expected_body_buffer_count));
+  ARROW_ASSIGN_OR_RAISE(expected_body_buffer_count,
+                        GetSparseTensorBodyBufferCount(sparse_tensor_format_id));
   if (payload.body_buffers.size() != expected_body_buffer_count) {
     return Status::Invalid("Invalid body buffer count for a sparse tensor");
   }
@@ -1038,16 +1028,15 @@ Status CheckSparseTensorBodyBufferCount(
 
 }  // namespace
 
-Status ReadSparseTensorBodyBufferCount(const Buffer& metadata, size_t* buffer_count) {
+Result<size_t> ReadSparseTensorBodyBufferCount(const Buffer& metadata) {
   SparseTensorFormat::type format_id;
 
   RETURN_NOT_OK(internal::GetSparseTensorMetadata(metadata, nullptr, nullptr, nullptr,
                                                   nullptr, &format_id));
-  return GetSparseTensorBodyBufferCount(format_id, buffer_count);
+  return GetSparseTensorBodyBufferCount(format_id);
 }
 
-Status ReadSparseTensorPayload(const IpcPayload& payload,
-                               std::shared_ptr<SparseTensor>* out) {
+Result<std::shared_ptr<SparseTensor>> ReadSparseTensorPayload(const IpcPayload& payload) {
   std::shared_ptr<DataType> type;
   std::vector<int64_t> shape;
   std::vector<std::string> dim_names;
@@ -1072,8 +1061,7 @@ Status ReadSparseTensorPayload(const IpcPayload& payload,
                             SparseCOOIndex::Make(indices_type, shape, non_zero_length,
                                                  payload.body_buffers[0]));
       return MakeSparseTensorWithSparseCOOIndex(type, shape, dim_names, sparse_index,
-                                                non_zero_length, payload.body_buffers[1],
-                                                out);
+                                                non_zero_length, payload.body_buffers[1]);
     }
     case SparseTensorFormat::CSR: {
       std::shared_ptr<SparseCSRIndex> sparse_index;
@@ -1088,8 +1076,10 @@ Status ReadSparseTensorPayload(const IpcPayload& payload,
           SparseCSRIndex::Make(indices_type, shape, non_zero_length,
                                payload.body_buffers[0], payload.body_buffers[1]));
       return MakeSparseTensorWithSparseCSRIndex(type, shape, dim_names, sparse_index,
-                                                non_zero_length, payload.body_buffers[2],
-                                                out);
+                                                non_zero_length, payload.body_buffers[2]);
+    }
+    case SparseTensorFormat::CSC: {
+      return Status::NotImplemented("TODO: CSC support");
     }
     default:
       return Status::Invalid("Unsupported sparse index format");
@@ -1098,8 +1088,8 @@ Status ReadSparseTensorPayload(const IpcPayload& payload,
 
 }  // namespace internal
 
-Status ReadSparseTensor(const Buffer& metadata, io::RandomAccessFile* file,
-                        std::shared_ptr<SparseTensor>* out) {
+Result<std::shared_ptr<SparseTensor>> ReadSparseTensor(const Buffer& metadata,
+                                                       io::RandomAccessFile* file) {
   std::shared_ptr<DataType> type;
   std::vector<int64_t> shape;
   std::vector<std::string> dim_names;
@@ -1121,39 +1111,39 @@ Status ReadSparseTensor(const Buffer& metadata, io::RandomAccessFile* file,
           sparse_index, ReadSparseCOOIndex(sparse_tensor, shape, non_zero_length, file));
       return MakeSparseTensorWithSparseCOOIndex(
           type, shape, dim_names, checked_pointer_cast<SparseCOOIndex>(sparse_index),
-          non_zero_length, data, out);
+          non_zero_length, data);
     }
     case SparseTensorFormat::CSR: {
       ARROW_ASSIGN_OR_RAISE(
           sparse_index, ReadSparseCSXIndex(sparse_tensor, shape, non_zero_length, file));
       return MakeSparseTensorWithSparseCSRIndex(
           type, shape, dim_names, checked_pointer_cast<SparseCSRIndex>(sparse_index),
-          non_zero_length, data, out);
+          non_zero_length, data);
     }
     case SparseTensorFormat::CSC: {
       ARROW_ASSIGN_OR_RAISE(
           sparse_index, ReadSparseCSXIndex(sparse_tensor, shape, non_zero_length, file));
       return MakeSparseTensorWithSparseCSCIndex(
           type, shape, dim_names, checked_pointer_cast<SparseCSCIndex>(sparse_index),
-          non_zero_length, data, out);
+          non_zero_length, data);
     }
     default:
       return Status::Invalid("Unsupported sparse index format");
   }
 }
 
-Status ReadSparseTensor(const Message& message, std::shared_ptr<SparseTensor>* out) {
+Result<std::shared_ptr<SparseTensor>> ReadSparseTensor(const Message& message) {
   io::BufferReader buffer_reader(message.body());
-  return ReadSparseTensor(*message.metadata(), &buffer_reader, out);
+  return ReadSparseTensor(*message.metadata(), &buffer_reader);
 }
 
-Status ReadSparseTensor(io::InputStream* file, std::shared_ptr<SparseTensor>* out) {
+Result<std::shared_ptr<SparseTensor>> ReadSparseTensor(io::InputStream* file) {
   std::unique_ptr<Message> message;
   RETURN_NOT_OK(ReadContiguousPayload(file, &message));
   CHECK_MESSAGE_TYPE(Message::SPARSE_TENSOR, message->type());
   CHECK_HAS_BODY(*message);
   io::BufferReader buffer_reader(message->body());
-  return ReadSparseTensor(*message->metadata(), &buffer_reader, out);
+  return ReadSparseTensor(*message->metadata(), &buffer_reader);
 }
 
 }  // namespace ipc
diff --git a/cpp/src/arrow/ipc/reader.h b/cpp/src/arrow/ipc/reader.h
index 33634d9..3425101 100644
--- a/cpp/src/arrow/ipc/reader.h
+++ b/cpp/src/arrow/ipc/reader.h
@@ -259,34 +259,30 @@ Status ReadRecordBatch(const Buffer& metadata, const std::shared_ptr<Schema>& sc
 /// \brief Read arrow::Tensor as encapsulated IPC message in file
 ///
 /// \param[in] file an InputStream pointed at the start of the message
-/// \param[out] out the read tensor
-/// \return Status
+/// \return the read tensor
 ARROW_EXPORT
-Status ReadTensor(io::InputStream* file, std::shared_ptr<Tensor>* out);
+Result<std::shared_ptr<Tensor>> ReadTensor(io::InputStream* file);
 
 /// \brief EXPERIMENTAL: Read arrow::Tensor from IPC message
 ///
 /// \param[in] message a Message containing the tensor metadata and body
-/// \param[out] out the read tensor
-/// \return Status
+/// \return the read tensor
 ARROW_EXPORT
-Status ReadTensor(const Message& message, std::shared_ptr<Tensor>* out);
+Result<std::shared_ptr<Tensor>> ReadTensor(const Message& message);
 
 /// \brief EXPERIMETNAL: Read arrow::SparseTensor as encapsulated IPC message in file
 ///
 /// \param[in] file an InputStream pointed at the start of the message
-/// \param[out] out the read sparse tensor
-/// \return Status
+/// \return the read sparse tensor
 ARROW_EXPORT
-Status ReadSparseTensor(io::InputStream* file, std::shared_ptr<SparseTensor>* out);
+Result<std::shared_ptr<SparseTensor>> ReadSparseTensor(io::InputStream* file);
 
 /// \brief EXPERIMENTAL: Read arrow::SparseTensor from IPC message
 ///
 /// \param[in] message a Message containing the tensor metadata and body
-/// \param[out] out the read sparse tensor
-/// \return Status
+/// \return the read sparse tensor
 ARROW_EXPORT
-Status ReadSparseTensor(const Message& message, std::shared_ptr<SparseTensor>* out);
+Result<std::shared_ptr<SparseTensor>> ReadSparseTensor(const Message& message);
 
 namespace internal {
 
@@ -294,18 +290,15 @@ namespace internal {
 
 /// \brief EXPERIMENTAL: Read arrow::SparseTensorFormat::type from a metadata
 /// \param[in] metadata a Buffer containing the sparse tensor metadata
-/// \param[out] buffer_count the returned count of the body buffers
-/// \return Status
+/// \return the count of the body buffers
 ARROW_EXPORT
-Status ReadSparseTensorBodyBufferCount(const Buffer& metadata, size_t* buffer_count);
+Result<size_t> ReadSparseTensorBodyBufferCount(const Buffer& metadata);
 
 /// \brief EXPERIMENTAL: Read arrow::SparseTensor from an IpcPayload
 /// \param[in] payload a IpcPayload contains a serialized SparseTensor
-/// \param[out] out the returned SparseTensor
-/// \return Status
+/// \return the read sparse tensor
 ARROW_EXPORT
-Status ReadSparseTensorPayload(const IpcPayload& payload,
-                               std::shared_ptr<SparseTensor>* out);
+Result<std::shared_ptr<SparseTensor>> ReadSparseTensorPayload(const IpcPayload& payload);
 
 }  // namespace internal
 
diff --git a/cpp/src/arrow/ipc/writer.cc b/cpp/src/arrow/ipc/writer.cc
index 88ba56f..971b9c6 100644
--- a/cpp/src/arrow/ipc/writer.cc
+++ b/cpp/src/arrow/ipc/writer.cc
@@ -625,7 +625,7 @@ namespace {
 Status WriteTensorHeader(const Tensor& tensor, io::OutputStream* dst,
                          int32_t* metadata_length) {
   std::shared_ptr<Buffer> metadata;
-  RETURN_NOT_OK(internal::WriteTensorMessage(tensor, 0, &metadata));
+  ARROW_ASSIGN_OR_RAISE(metadata, internal::WriteTensorMessage(tensor, 0));
   IpcOptions options;
   options.alignment = kTensorAlignment;
   return WriteMessage(*metadata, options, dst, metadata_length);
@@ -720,7 +720,7 @@ Status GetTensorMessage(const Tensor& tensor, MemoryPool* pool,
   }
 
   std::shared_ptr<Buffer> metadata;
-  RETURN_NOT_OK(internal::WriteTensorMessage(*tensor_to_write, 0, &metadata));
+  ARROW_ASSIGN_OR_RAISE(metadata, internal::WriteTensorMessage(*tensor_to_write, 0));
   out->reset(new Message(metadata, tensor_to_write->data()));
   return Status::OK();
 }
@@ -761,8 +761,8 @@ class SparseTensorSerializer {
   }
 
   Status SerializeMetadata(const SparseTensor& sparse_tensor) {
-    return WriteSparseTensorMessage(sparse_tensor, out_->body_length, buffer_meta_,
-                                    &out_->metadata);
+    return WriteSparseTensorMessage(sparse_tensor, out_->body_length, buffer_meta_)
+        .Value(&out_->metadata);
   }
 
   Status Assemble(const SparseTensor& sparse_tensor) {
diff --git a/cpp/src/arrow/python/deserialize.cc b/cpp/src/arrow/python/deserialize.cc
index 82a3e84..c4e420c 100644
--- a/cpp/src/arrow/python/deserialize.cc
+++ b/cpp/src/arrow/python/deserialize.cc
@@ -327,21 +327,21 @@ Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out)
 
   for (int i = 0; i < num_tensors; ++i) {
     std::shared_ptr<Tensor> tensor;
-    RETURN_NOT_OK(ipc::ReadTensor(src, &tensor));
+    ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(src));
     RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
     out->tensors.push_back(tensor);
   }
 
   for (int i = 0; i < num_sparse_tensors; ++i) {
     std::shared_ptr<SparseTensor> sparse_tensor;
-    RETURN_NOT_OK(ipc::ReadSparseTensor(src, &sparse_tensor));
+    ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::ReadSparseTensor(src));
     RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
     out->sparse_tensors.push_back(sparse_tensor);
   }
 
   for (int i = 0; i < num_ndarrays; ++i) {
     std::shared_ptr<Tensor> ndarray;
-    RETURN_NOT_OK(ipc::ReadTensor(src, &ndarray));
+    ARROW_ASSIGN_OR_RAISE(ndarray, ipc::ReadTensor(src));
     RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
     out->ndarrays.push_back(ndarray);
   }
@@ -411,7 +411,7 @@ Status GetSerializedFromComponents(int num_tensors,
 
     ipc::Message message(metadata, body);
 
-    RETURN_NOT_OK(ipc::ReadTensor(message, &tensor));
+    ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
     out->tensors.emplace_back(std::move(tensor));
   }
 
@@ -420,9 +420,9 @@ Status GetSerializedFromComponents(int num_tensors,
     ipc::internal::IpcPayload payload;
     RETURN_NOT_OK(GetBuffer(buffer_index++, &payload.metadata));
 
-    size_t num_bodies;
-    RETURN_NOT_OK(
-        ipc::internal::ReadSparseTensorBodyBufferCount(*payload.metadata, &num_bodies));
+    ARROW_ASSIGN_OR_RAISE(
+        size_t num_bodies,
+        ipc::internal::ReadSparseTensorBodyBufferCount(*payload.metadata));
 
     payload.body_buffers.reserve(num_bodies);
     for (size_t i = 0; i < num_bodies; ++i) {
@@ -432,7 +432,7 @@ Status GetSerializedFromComponents(int num_tensors,
     }
 
     std::shared_ptr<SparseTensor> sparse_tensor;
-    RETURN_NOT_OK(ipc::internal::ReadSparseTensorPayload(payload, &sparse_tensor));
+    ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::internal::ReadSparseTensorPayload(payload));
     out->sparse_tensors.emplace_back(std::move(sparse_tensor));
   }
 
@@ -446,7 +446,7 @@ Status GetSerializedFromComponents(int num_tensors,
 
     ipc::Message message(metadata, body);
 
-    RETURN_NOT_OK(ipc::ReadTensor(message, &tensor));
+    ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
     out->ndarrays.emplace_back(std::move(tensor));
   }
 
diff --git a/cpp/src/arrow/sparse_tensor.cc b/cpp/src/arrow/sparse_tensor.cc
index 421287f..2c976ed 100644
--- a/cpp/src/arrow/sparse_tensor.cc
+++ b/cpp/src/arrow/sparse_tensor.cc
@@ -33,7 +33,7 @@ namespace arrow {
 // SparseIndex
 
 Status SparseIndex::ValidateShape(const std::vector<int64_t>& shape) const {
-  if (!std::all_of(shape.begin(), shape.end(), [](int64_t x) { return x > 0; })) {
+  if (!std::all_of(shape.begin(), shape.end(), [](int64_t x) { return x >= 0; })) {
     return Status::Invalid("Shape elements must be positive");
   }
 
diff --git a/cpp/src/arrow/sparse_tensor.h b/cpp/src/arrow/sparse_tensor.h
index fe0d192..ea3b65d 100644
--- a/cpp/src/arrow/sparse_tensor.h
+++ b/cpp/src/arrow/sparse_tensor.h
@@ -251,7 +251,7 @@ class SparseCSXIndex : public SparseIndexBase<SparseIndexType> {
       return Status::Invalid("shape length is too long");
     }
 
-    if (indptr_->shape()[0] == shape[0] + 1) {
+    if (indptr_->shape()[0] == shape[static_cast<int64_t>(kCompressedAxis)] + 1) {
       return Status::OK();
     }
 
diff --git a/cpp/src/arrow/tensor.cc b/cpp/src/arrow/tensor.cc
index 8a924ba..a804507 100644
--- a/cpp/src/arrow/tensor.cc
+++ b/cpp/src/arrow/tensor.cc
@@ -105,7 +105,7 @@ inline Status CheckTensorValidity(const std::shared_ptr<DataType>& type,
   if (!data) {
     return Status::Invalid("Null data is supplied");
   }
-  if (!std::all_of(shape.begin(), shape.end(), [](int64_t x) { return x > 0; })) {
+  if (!std::all_of(shape.begin(), shape.end(), [](int64_t x) { return x >= 0; })) {
     return Status::Invalid("Shape elements must be positive");
   }
   return Status::OK();
diff --git a/python/pyarrow/includes/libarrow.pxd b/python/pyarrow/includes/libarrow.pxd
index dc1c27c..77ef487 100644
--- a/python/pyarrow/includes/libarrow.pxd
+++ b/python/pyarrow/includes/libarrow.pxd
@@ -1211,7 +1211,7 @@ cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil:
                         int32_t* metadata_length,
                         int64_t* body_length)
 
-    CStatus ReadTensor(CInputStream* stream, shared_ptr[CTensor]* out)
+    CResult[shared_ptr[CTensor]] ReadTensor(CInputStream* stream)
 
     CStatus ReadRecordBatch(const CMessage& message,
                             const shared_ptr[CSchema]& schema,
diff --git a/python/pyarrow/ipc.pxi b/python/pyarrow/ipc.pxi
index 95c7e26..4d1984c 100644
--- a/python/pyarrow/ipc.pxi
+++ b/python/pyarrow/ipc.pxi
@@ -529,7 +529,7 @@ def read_tensor(source):
 
     c_stream = nf.get_input_stream().get()
     with nogil:
-        check_status(ReadTensor(c_stream, &sp_tensor))
+        sp_tensor = GetResultValue(ReadTensor(c_stream))
     return pyarrow_wrap_tensor(sp_tensor)