You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@avro.apache.org by th...@apache.org on 2021/03/22 09:35:21 UTC

[avro] branch master updated: Avro 3089 fix cpp warnings (#1149)

This is an automated email from the ASF dual-hosted git repository.

thiru pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/avro.git


The following commit(s) were added to refs/heads/master by this push:
     new 338db27  Avro 3089 fix cpp warnings (#1149)
338db27 is described below

commit 338db27462cbb442a60033f99fde7d92f863b28a
Author: Thiruvalluvan M G <th...@apache.org>
AuthorDate: Mon Mar 22 15:05:09 2021 +0530

    Avro 3089 fix cpp warnings (#1149)
    
    * Fixed the error that caused C++ lint to fail
    
    * Fixed C++ warnings in automated builds
    
    * Addressed review comments
    
    Co-authored-by: Thiruvalluvan M G <th...@startsmartlabs.com>
---
 lang/c++/impl/NodeImpl.cc      |  4 ++--
 lang/c++/impl/ValidSchema.cc   |  2 +-
 lang/c++/test/DataFileTests.cc | 19 ++++++++++---------
 3 files changed, 13 insertions(+), 12 deletions(-)

diff --git a/lang/c++/impl/NodeImpl.cc b/lang/c++/impl/NodeImpl.cc
index 3e32062..f38b492 100644
--- a/lang/c++/impl/NodeImpl.cc
+++ b/lang/c++/impl/NodeImpl.cc
@@ -374,7 +374,7 @@ void NodeRecord::printDefaultToJson(const GenericDatum &g, std::ostream &os,
         os << "{\n";
 
         // Serialize all fields of the record with recursive calls:
-        for (auto i = 0; i < g.value<GenericRecord>().fieldCount(); i++) {
+        for (size_t i = 0; i < g.value<GenericRecord>().fieldCount(); i++) {
             if (i == 0) {
                 ++depth;
             } else {  // i > 0
@@ -418,7 +418,7 @@ void NodeMap::printDefaultToJson(const GenericDatum &g, std::ostream &os,
     } else {
         os << "{\n";
 
-        for (auto i = 0; i < g.value<GenericMap>().value().size(); i++) {
+        for (size_t i = 0; i < g.value<GenericMap>().value().size(); i++) {
             if (i == 0) {
                 ++depth;
             } else {
diff --git a/lang/c++/impl/ValidSchema.cc b/lang/c++/impl/ValidSchema.cc
index d70f819..8cd4db4 100644
--- a/lang/c++/impl/ValidSchema.cc
+++ b/lang/c++/impl/ValidSchema.cc
@@ -71,7 +71,7 @@ namespace avro {
 
         node->lock();
         auto leaves = node->leaves();
-        for (auto i = 0; i < leaves; ++i) {
+        for (size_t i = 0; i < leaves; ++i) {
             const NodePtr &leaf(node->leafAt(i));
 
             if (!validate(leaf, symbolMap)) {
diff --git a/lang/c++/test/DataFileTests.cc b/lang/c++/test/DataFileTests.cc
index ca9cb9a..b473baa 100644
--- a/lang/c++/test/DataFileTests.cc
+++ b/lang/c++/test/DataFileTests.cc
@@ -896,25 +896,25 @@ void testReadRecordEfficientlyUsingLastSync(avro::Codec codec) {
 
     const char* filename = "test_readRecordUsingLastSync.df";
     
-    int numberOfRecords = 100;
-    int recordToRead = 37;  // pick specific record to read efficiently
-    int syncPointWithRecord = 0;
-    int finalSync = 0;
-    int recordsUptoLastSync = 0;
-    int firstSyncPoint = 0;
+    size_t numberOfRecords = 100;
+    size_t recordToRead = 37;  // pick specific record to read efficiently
+    size_t syncPointWithRecord = 0;
+    size_t finalSync = 0;
+    size_t recordsUptoLastSync = 0;
+    size_t firstSyncPoint = 0;
     {
         avro::DataFileWriter<TestRecord> df(filename,
             writerSchema, 1024, codec);
 
         firstSyncPoint = df.getCurrentBlockStart();
         syncPointWithRecord = firstSyncPoint;
-        for(int i = 0; i < numberOfRecords; i++)
+        for (size_t i = 0; i < numberOfRecords; i++)
         {
             df.write(TestRecord(largeString, (int64_t)i));
 
             // During the write, gather all the sync boundaries from the lastSync() API
-            int recordsWritten = i + 1;
-            if((recordsWritten <= recordToRead) && (df.getCurrentBlockStart() != syncPointWithRecord))
+            size_t recordsWritten = i + 1;
+            if ((recordsWritten <= recordToRead) && (df.getCurrentBlockStart() != syncPointWithRecord))
             {
                 recordsUptoLastSync = i;    // 1 less than total number of records written, since the sync block is sealed before a write
                 syncPointWithRecord = df.getCurrentBlockStart();
@@ -935,6 +935,7 @@ void testReadRecordEfficientlyUsingLastSync(avro::Codec codec) {
         const uint8_t* pData = nullptr;
         size_t length = 0;
         bool hasRead = seekableInputStream->next(&pData, &length);
+        BOOST_CHECK(hasRead);
 
         // keep it simple, assume we've got in all data we want. We have a high buffersize to ensure this above.
         BOOST_CHECK_GE(length, firstSyncPoint);