You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by el...@apache.org on 2014/10/11 00:41:54 UTC
[12/21] git commit: ACCUMULO-3227 A bunch of string encoding issues.
ACCUMULO-3227 A bunch of string encoding issues.
Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/4acf7abd
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/4acf7abd
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/4acf7abd
Branch: refs/heads/master
Commit: 4acf7abd55da9b34007a3a6a393e1b6566caf1a4
Parents: dac8a89
Author: Josh Elser <el...@apache.org>
Authored: Fri Oct 10 17:43:50 2014 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Fri Oct 10 18:40:10 2014 -0400
----------------------------------------------------------------------
.../accumulo/core/replication/ReplicationSchema.java | 3 ++-
.../accumulo/core/replication/ReplicationTarget.java | 3 ++-
.../accumulo/server/replication/ReplicationUtil.java | 3 ++-
.../accumulo/server/util/MasterMetadataUtil.java | 15 ++++++++-------
.../tserver/replication/ReplicationProcessor.java | 3 ++-
5 files changed, 16 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/4acf7abd/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
index 3804566..6330dca 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
@@ -17,6 +17,7 @@
package org.apache.accumulo.core.replication;
import java.nio.charset.CharacterCodingException;
+import java.nio.charset.StandardCharsets;
import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.client.lexicoder.ULongLexicoder;
@@ -215,7 +216,7 @@ public class ReplicationSchema {
log.trace("Normalized {} into {}", file, pathString);
// Append the file as a suffix to the row
- row.append((ROW_SEPARATOR + pathString).getBytes(), 0, pathString.length() + ROW_SEPARATOR.getLength());
+ row.append((ROW_SEPARATOR + pathString).getBytes(StandardCharsets.UTF_8), 0, pathString.length() + ROW_SEPARATOR.getLength());
// Make the mutation and add the column update
return new Mutation(row);
http://git-wip-us.apache.org/repos/asf/accumulo/blob/4acf7abd/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
index d71d2bf..f882a8d 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.core.replication;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
@@ -154,7 +155,7 @@ public class ReplicationTarget implements Writable {
public static ReplicationTarget from(String s) {
ReplicationTarget target = new ReplicationTarget();
DataInputBuffer buffer = new DataInputBuffer();
- buffer.reset(s.getBytes(), s.length());
+ buffer.reset(s.getBytes(StandardCharsets.UTF_8), s.length());
try {
target.readFields(buffer);
http://git-wip-us.apache.org/repos/asf/accumulo/blob/4acf7abd/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
index 9f59b23..590cbe4 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
@@ -16,6 +16,7 @@
*/
package org.apache.accumulo.server.replication;
+import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@@ -200,7 +201,7 @@ public class ReplicationUtil {
public String getAbsolutePath(Connector conn, String workQueuePath, String queueKey) {
byte[] data = zooCache.get(workQueuePath + "/" + queueKey);
if (null != data) {
- return new String(data);
+ return new String(data, StandardCharsets.UTF_8);
}
return null;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/4acf7abd/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index 2f9e397..05f28d7 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -17,6 +17,7 @@
package org.apache.accumulo.server.util;
import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -69,8 +70,8 @@ public class MasterMetadataUtil {
Map<FileRef,Long> bulkLoadedFiles, Credentials credentials, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
Mutation m = extent.getPrevRowUpdateMutation();
- TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
- TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(StandardCharsets.UTF_8)));
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(StandardCharsets.UTF_8)));
if (lastFlushID > 0)
TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
if (lastCompactID > 0)
@@ -103,7 +104,7 @@ public class MasterMetadataUtil {
throw new IllegalArgumentException("Metadata entry does not have split ratio (" + metadataEntry + ")");
}
- double splitRatio = Double.parseDouble(new String(columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN).get()));
+ double splitRatio = Double.parseDouble(new String(columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN).get(), StandardCharsets.UTF_8));
Value prevEndRowIBW = columns.get(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
@@ -215,7 +216,7 @@ public class MasterMetadataUtil {
m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
for (FileRef scanFile : scanFiles)
- m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value("".getBytes()));
+ m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
if (size.getNumEntries() > 0)
m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode()));
@@ -299,7 +300,7 @@ public class MasterMetadataUtil {
if (dfv.getNumEntries() > 0) {
m.put(DataFileColumnFamily.NAME, path.meta(), new Value(dfv.encode()));
- TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(StandardCharsets.UTF_8)));
// stuff in this location
TServerInstance self = getTServerInstance(address, zooLock);
self.putLastLocation(m);
@@ -314,12 +315,12 @@ public class MasterMetadataUtil {
}
for (FileRef scanFile : filesInUseByScans)
- m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value("".getBytes()));
+ m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
if (mergeFile != null)
m.putDelete(DataFileColumnFamily.NAME, mergeFile.meta());
- TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
+ TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(Long.toString(flushId).getBytes(StandardCharsets.UTF_8)));
return m;
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/4acf7abd/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
index 6f4d987..fbae6f2 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
@@ -17,6 +17,7 @@
package org.apache.accumulo.tserver.replication;
import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.NoSuchElementException;
@@ -77,7 +78,7 @@ public class ReplicationProcessor implements Processor {
@Override
public void process(String workID, byte[] data) {
ReplicationTarget target = DistributedWorkQueueWorkAssignerHelper.fromQueueKey(workID).getValue();
- String file = new String(data);
+ String file = new String(data, StandardCharsets.UTF_8);
log.debug("Received replication work for {} to {}", file, target);