You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/07/29 18:28:51 UTC
svn commit: r1152295 [10/10] - in /hadoop/common/trunk/hdfs: ./ bin/ ivy/
src/docs/src/documentation/content/xdocs/ src/java/
src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/server/common/ src/...
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml?rev=1152295&r1=1152294&r2=1152295&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml Fri Jul 29 16:28:45 2011
@@ -1,428 +1,498 @@
<?xml version="1.0"?>
<EDITS>
- <EDITS_VERSION>-24</EDITS_VERSION>
+ <EDITS_VERSION>-38</EDITS_VERSION>
+ <RECORD>
+ <OPCODE>24</OPCODE>
+ <DATA>
+ <TRANSACTION_ID>1</TRANSACTION_ID>
+ </DATA>
+ <CHECKSUM>1504643968</CHECKSUM>
+ </RECORD>
<RECORD>
<OPCODE>21</OPCODE>
<DATA>
+ <TRANSACTION_ID>2</TRANSACTION_ID>
<KEY_ID>1</KEY_ID>
- <KEY_EXPIRY_DATE>1287183164658</KEY_EXPIRY_DATE>
+ <KEY_EXPIRY_DATE>1304751257518</KEY_EXPIRY_DATE>
<KEY_LENGTH>3</KEY_LENGTH>
- <KEY_BLOB>drEs</KEY_BLOB>
+ <KEY_BLOB>2FhO</KEY_BLOB>
</DATA>
+ <CHECKSUM>-174778556</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>21</OPCODE>
<DATA>
+ <TRANSACTION_ID>3</TRANSACTION_ID>
<KEY_ID>2</KEY_ID>
- <KEY_EXPIRY_DATE>1287183164703</KEY_EXPIRY_DATE>
+ <KEY_EXPIRY_DATE>1304751257521</KEY_EXPIRY_DATE>
<KEY_LENGTH>3</KEY_LENGTH>
- <KEY_BLOB>1cGc</KEY_BLOB>
+ <KEY_BLOB>77-r</KEY_BLOB>
</DATA>
+ <CHECKSUM>1565957291</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>10</OPCODE>
<DATA>
+ <TRANSACTION_ID>4</TRANSACTION_ID>
<GENERATION_STAMP>1001</GENERATION_STAMP>
</DATA>
+ <CHECKSUM>1423210231</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>0</OPCODE>
<DATA>
+ <TRANSACTION_ID>5</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491964741</MTIME>
- <ATIME>1286491964741</ATIME>
+ <MTIME>1304060057562</MTIME>
+ <ATIME>1304060057562</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>0</NUMBLOCKS>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
- <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
</DATA>
+ <CHECKSUM>-1854451489</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>9</OPCODE>
<DATA>
+ <TRANSACTION_ID>6</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491964758</MTIME>
- <ATIME>1286491964741</ATIME>
+ <MTIME>1304060057572</MTIME>
+ <ATIME>1304060057562</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>0</NUMBLOCKS>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
</DATA>
+ <CHECKSUM>617592855</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>1</OPCODE>
<DATA>
+ <TRANSACTION_ID>7</TRANSACTION_ID>
<LENGTH>3</LENGTH>
<SOURCE>/file_create</SOURCE>
<DESTINATION>/file_moved</DESTINATION>
- <TIMESTAMP>1286491964766</TIMESTAMP>
+ <TIMESTAMP>1304060057575</TIMESTAMP>
</DATA>
+ <CHECKSUM>367100554</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>2</OPCODE>
<DATA>
+ <TRANSACTION_ID>8</TRANSACTION_ID>
<LENGTH>2</LENGTH>
<PATH>/file_moved</PATH>
- <TIMESTAMP>1286491964775</TIMESTAMP>
+ <TIMESTAMP>1304060057577</TIMESTAMP>
</DATA>
+ <CHECKSUM>1048346698</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>3</OPCODE>
<DATA>
+ <TRANSACTION_ID>9</TRANSACTION_ID>
<LENGTH>3</LENGTH>
<PATH>/directory_mkdir</PATH>
- <TIMESTAMP>1286491964783</TIMESTAMP>
+ <TIMESTAMP>1304060057581</TIMESTAMP>
<ATIME>0</ATIME>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>493</FS_PERMISSIONS>
</PERMISSION_STATUS>
</DATA>
+ <CHECKSUM>1207240248</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>10</OPCODE>
<DATA>
+ <TRANSACTION_ID>10</TRANSACTION_ID>
<GENERATION_STAMP>1002</GENERATION_STAMP>
</DATA>
+ <CHECKSUM>85982431</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>0</OPCODE>
<DATA>
+ <TRANSACTION_ID>11</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491964796</MTIME>
- <ATIME>1286491964796</ATIME>
+ <MTIME>1304060057584</MTIME>
+ <ATIME>1304060057584</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>0</NUMBLOCKS>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
- <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
</DATA>
+ <CHECKSUM>1796314473</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>9</OPCODE>
<DATA>
+ <TRANSACTION_ID>12</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491964814</MTIME>
- <ATIME>1286491964796</ATIME>
+ <MTIME>1304060057588</MTIME>
+ <ATIME>1304060057584</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>0</NUMBLOCKS>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
</DATA>
+ <CHECKSUM>1017626905</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>4</OPCODE>
<DATA>
+ <TRANSACTION_ID>13</TRANSACTION_ID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
</DATA>
+ <CHECKSUM>1842610087</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>7</OPCODE>
<DATA>
+ <TRANSACTION_ID>14</TRANSACTION_ID>
<PATH>/file_create</PATH>
<FS_PERMISSIONS>511</FS_PERMISSIONS>
</DATA>
+ <CHECKSUM>605568911</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>8</OPCODE>
<DATA>
+ <TRANSACTION_ID>15</TRANSACTION_ID>
<PATH>/file_create</PATH>
<USERNAME>newOwner</USERNAME>
<GROUPNAME/>
</DATA>
+ <CHECKSUM>-1411790340</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>13</OPCODE>
<DATA>
+ <TRANSACTION_ID>16</TRANSACTION_ID>
<LENGTH>3</LENGTH>
<PATH>/file_create</PATH>
<MTIME>1285195527000</MTIME>
<ATIME>1285195527000</ATIME>
</DATA>
+ <CHECKSUM>1428793678</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>14</OPCODE>
<DATA>
+ <TRANSACTION_ID>17</TRANSACTION_ID>
<PATH>/directory_mkdir</PATH>
<NS_QUOTA>1000</NS_QUOTA>
<DS_QUOTA>-1</DS_QUOTA>
</DATA>
+ <CHECKSUM>-1476130374</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>15</OPCODE>
<DATA>
+ <TRANSACTION_ID>18</TRANSACTION_ID>
<LENGTH>3</LENGTH>
<SOURCE>/file_create</SOURCE>
<DESTINATION>/file_moved</DESTINATION>
- <TIMESTAMP>1286491964858</TIMESTAMP>
+ <TIMESTAMP>1304060057605</TIMESTAMP>
<RENAME_OPTIONS>AA</RENAME_OPTIONS>
</DATA>
+ <CHECKSUM>-1155144192</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>10</OPCODE>
<DATA>
+ <TRANSACTION_ID>19</TRANSACTION_ID>
<GENERATION_STAMP>1003</GENERATION_STAMP>
</DATA>
+ <CHECKSUM>1920677987</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>0</OPCODE>
<DATA>
+ <TRANSACTION_ID>20</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491964873</MTIME>
- <ATIME>1286491964873</ATIME>
+ <MTIME>1304060057613</MTIME>
+ <ATIME>1304060057613</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>0</NUMBLOCKS>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
- <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
</DATA>
+ <CHECKSUM>-428545606</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>9</OPCODE>
<DATA>
+ <TRANSACTION_ID>21</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491965024</MTIME>
- <ATIME>1286491964873</ATIME>
+ <MTIME>1304060057694</MTIME>
+ <ATIME>1304060057613</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>3</NUMBLOCKS>
<BLOCK>
- <BLOCK_ID>1096087107607101866</BLOCK_ID>
+ <BLOCK_ID>3459038074990663911</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1003</BLOCK_GENERATION_STAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>8798023959648425597</BLOCK_ID>
+ <BLOCK_ID>-5555244278278879146</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1003</BLOCK_GENERATION_STAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>4060815343079109399</BLOCK_ID>
+ <BLOCK_ID>-6344128791846831740</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1003</BLOCK_GENERATION_STAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
</DATA>
+ <CHECKSUM>707995174</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>10</OPCODE>
<DATA>
+ <TRANSACTION_ID>22</TRANSACTION_ID>
<GENERATION_STAMP>1004</GENERATION_STAMP>
</DATA>
+ <CHECKSUM>-1500977009</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>0</OPCODE>
<DATA>
+ <TRANSACTION_ID>23</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491965035</MTIME>
- <ATIME>1286491965035</ATIME>
+ <MTIME>1304060057701</MTIME>
+ <ATIME>1304060057701</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>0</NUMBLOCKS>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
- <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
</DATA>
+ <CHECKSUM>-119850856</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>9</OPCODE>
<DATA>
+ <TRANSACTION_ID>24</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491965093</MTIME>
- <ATIME>1286491965035</ATIME>
+ <MTIME>1304060057737</MTIME>
+ <ATIME>1304060057701</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>3</NUMBLOCKS>
<BLOCK>
- <BLOCK_ID>85340326229460895</BLOCK_ID>
+ <BLOCK_ID>4671949296381030428</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1004</BLOCK_GENERATION_STAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>4456960998526419279</BLOCK_ID>
+ <BLOCK_ID>-844362243522407159</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1004</BLOCK_GENERATION_STAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-6161739531018161735</BLOCK_ID>
+ <BLOCK_ID>3476886462779656950</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1004</BLOCK_GENERATION_STAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
</DATA>
+ <CHECKSUM>-766805874</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>10</OPCODE>
<DATA>
+ <TRANSACTION_ID>25</TRANSACTION_ID>
<GENERATION_STAMP>1005</GENERATION_STAMP>
</DATA>
+ <CHECKSUM>238426056</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>0</OPCODE>
<DATA>
+ <TRANSACTION_ID>26</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491965105</MTIME>
- <ATIME>1286491965105</ATIME>
+ <MTIME>1304060057742</MTIME>
+ <ATIME>1304060057742</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>0</NUMBLOCKS>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
- <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
</DATA>
+ <CHECKSUM>1156254705</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>9</OPCODE>
<DATA>
+ <TRANSACTION_ID>27</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1286491965148</MTIME>
- <ATIME>1286491965105</ATIME>
+ <MTIME>1304060057764</MTIME>
+ <ATIME>1304060057742</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<NUMBLOCKS>3</NUMBLOCKS>
<BLOCK>
- <BLOCK_ID>-3894328423940677915</BLOCK_ID>
+ <BLOCK_ID>-754893470864399741</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1005</BLOCK_GENERATION_STAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-2833847567910728858</BLOCK_ID>
+ <BLOCK_ID>1820875380010181049</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1005</BLOCK_GENERATION_STAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-3654781106237722465</BLOCK_ID>
+ <BLOCK_ID>8266387560744259971</BLOCK_ID>
<BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
<BLOCK_GENERATION_STAMP>1005</BLOCK_GENERATION_STAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>420</FS_PERMISSIONS>
</PERMISSION_STATUS>
</DATA>
+ <CHECKSUM>-654780301</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>16</OPCODE>
<DATA>
+ <TRANSACTION_ID>28</TRANSACTION_ID>
<LENGTH>4</LENGTH>
<CONCAT_TARGET>/file_concat_target</CONCAT_TARGET>
<CONCAT_SOURCE>/file_concat_0</CONCAT_SOURCE>
<CONCAT_SOURCE>/file_concat_1</CONCAT_SOURCE>
- <TIMESTAMP>1286491965157</TIMESTAMP>
+ <TIMESTAMP>1304060057767</TIMESTAMP>
</DATA>
+ <CHECKSUM>1273279541</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>17</OPCODE>
<DATA>
+ <TRANSACTION_ID>29</TRANSACTION_ID>
<LENGTH>4</LENGTH>
<SOURCE>/file_symlink</SOURCE>
<DESTINATION>/file_concat_target</DESTINATION>
- <MTIME>1286491965168</MTIME>
- <ATIME>1286491965168</ATIME>
+ <MTIME>1304060057770</MTIME>
+ <ATIME>1304060057770</ATIME>
<PERMISSION_STATUS>
- <USERNAME>steffl</USERNAME>
+ <USERNAME>todd</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<FS_PERMISSIONS>511</FS_PERMISSIONS>
</PERMISSION_STATUS>
</DATA>
+ <CHECKSUM>1385678569</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>18</OPCODE>
<DATA>
+ <TRANSACTION_ID>30</TRANSACTION_ID>
<T_VERSION>0</T_VERSION>
- <T_OWNER>steffl</T_OWNER>
+ <T_OWNER>todd</T_OWNER>
<T_RENEWER>JobTracker</T_RENEWER>
<T_REAL_USER/>
- <T_ISSUE_DATE>1286491965176</T_ISSUE_DATE>
- <T_MAX_DATE>1287096765176</T_MAX_DATE>
+ <T_ISSUE_DATE>1304060057773</T_ISSUE_DATE>
+ <T_MAX_DATE>1304664857773</T_MAX_DATE>
<T_SEQUENCE_NUMBER>1</T_SEQUENCE_NUMBER>
<T_MASTER_KEY_ID>2</T_MASTER_KEY_ID>
- <T_EXPIRY_TIME>1286578365176</T_EXPIRY_TIME>
+ <T_EXPIRY_TIME>1304146457773</T_EXPIRY_TIME>
</DATA>
+ <CHECKSUM>913145699</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>19</OPCODE>
<DATA>
+ <TRANSACTION_ID>31</TRANSACTION_ID>
<T_VERSION>0</T_VERSION>
- <T_OWNER>steffl</T_OWNER>
+ <T_OWNER>todd</T_OWNER>
<T_RENEWER>JobTracker</T_RENEWER>
<T_REAL_USER/>
- <T_ISSUE_DATE>1286491965176</T_ISSUE_DATE>
- <T_MAX_DATE>1287096765176</T_MAX_DATE>
+ <T_ISSUE_DATE>1304060057773</T_ISSUE_DATE>
+ <T_MAX_DATE>1304664857773</T_MAX_DATE>
<T_SEQUENCE_NUMBER>1</T_SEQUENCE_NUMBER>
<T_MASTER_KEY_ID>2</T_MASTER_KEY_ID>
- <T_EXPIRY_TIME>1286578365198</T_EXPIRY_TIME>
+ <T_EXPIRY_TIME>1304146457785</T_EXPIRY_TIME>
</DATA>
+ <CHECKSUM>-1772039941</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>20</OPCODE>
<DATA>
+ <TRANSACTION_ID>32</TRANSACTION_ID>
<T_VERSION>0</T_VERSION>
- <T_OWNER>steffl</T_OWNER>
+ <T_OWNER>todd</T_OWNER>
<T_RENEWER>JobTracker</T_RENEWER>
<T_REAL_USER/>
- <T_ISSUE_DATE>1286491965176</T_ISSUE_DATE>
- <T_MAX_DATE>1287096765176</T_MAX_DATE>
+ <T_ISSUE_DATE>1304060057773</T_ISSUE_DATE>
+ <T_MAX_DATE>1304664857773</T_MAX_DATE>
<T_SEQUENCE_NUMBER>1</T_SEQUENCE_NUMBER>
<T_MASTER_KEY_ID>2</T_MASTER_KEY_ID>
</DATA>
+ <CHECKSUM>1382094146</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>0</OPCODE>
<DATA>
+ <TRANSACTION_ID>33</TRANSACTION_ID>
<LENGTH>5</LENGTH>
<PATH>/reassign-lease-test</PATH>
<REPLICATION>1</REPLICATION>
@@ -438,14 +508,24 @@
<CLIENT_NAME>DFSClient_871171074</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
</DATA>
+ <CHECKSUM>1975140107</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>22</OPCODE>
<DATA>
+ <TRANSACTION_ID>34</TRANSACTION_ID>
<CLIENT_NAME>DFSClient_871171074</CLIENT_NAME>
<PATH>/reassign-lease-test</PATH>
<CLIENT_NAME>HDFS_NameNode</CLIENT_NAME>
</DATA>
+ <CHECKSUM>1975140107</CHECKSUM>
+ </RECORD>
+ <RECORD>
+ <OPCODE>23</OPCODE>
+ <DATA>
+ <TRANSACTION_ID>35</TRANSACTION_ID>
+ </DATA>
+ <CHECKSUM>1975140107</CHECKSUM>
</RECORD>
<RECORD>
<OPCODE>-1</OPCODE>
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1152295&r1=1152294&r2=1152295&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Fri Jul 29 16:28:45 2011
@@ -28,7 +28,6 @@ import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.net.URI;
import java.util.HashMap;
import java.util.Set;
@@ -41,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
/**
@@ -128,11 +128,10 @@ public class TestOfflineImageViewer exte
cluster.getNameNode().saveNamespace();
// Determine location of fsimage file
- URI [] files = cluster.getNameDirs(0).toArray(new URI[0]);
- orig = new File(files[0].getPath(), "current/fsimage");
-
- if (!orig.exists()) {
- fail("Didn't generate or can't find fsimage.");
+ orig = FSImageTestUtil.findLatestImageFile(
+ cluster.getNameNode().getFSImage().getStorage().getStorageDir(0));
+ if (orig == null) {
+ fail("Didn't generate or can't find fsimage");
}
} finally {
if(cluster != null)
Added: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java?rev=1152295&view=auto
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java (added)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java Fri Jul 29 16:28:45 2011
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.aspectj.util.FileUtil;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.base.Joiner;
+
+public class TestAtomicFileOutputStream {
+
+ private static final String TEST_STRING = "hello world";
+ private static final String TEST_STRING_2 = "goodbye world";
+
+ private static File BASE_DIR = new File(
+ System.getProperty("test.build.data", "build/test/data"));
+ private static File TEST_DIR = new File(BASE_DIR,
+ TestAtomicFileOutputStream.class.getName());
+
+ private static File DST_FILE = new File(TEST_DIR, "test.txt");
+
+ @Before
+ public void cleanupTestDir() throws IOException {
+ assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
+ FileUtil.deleteContents(TEST_DIR);
+ }
+
+ /**
+ * Test case where there is no existing file
+ */
+ @Test
+ public void testWriteNewFile() throws IOException {
+ OutputStream fos = new AtomicFileOutputStream(DST_FILE);
+ assertFalse(DST_FILE.exists());
+ fos.write(TEST_STRING.getBytes());
+ fos.flush();
+ assertFalse(DST_FILE.exists());
+ fos.close();
+ assertTrue(DST_FILE.exists());
+
+ String readBackData = DFSTestUtil.readFile(DST_FILE);
+ assertEquals(TEST_STRING, readBackData);
+ }
+
+ /**
+ * Test case where there is no existing file
+ */
+ @Test
+ public void testOverwriteFile() throws IOException {
+ assertTrue("Creating empty dst file", DST_FILE.createNewFile());
+
+ OutputStream fos = new AtomicFileOutputStream(DST_FILE);
+
+ assertTrue("Empty file still exists", DST_FILE.exists());
+ fos.write(TEST_STRING.getBytes());
+ fos.flush();
+
+ // Original contents still in place
+ assertEquals("", DFSTestUtil.readFile(DST_FILE));
+
+ fos.close();
+
+ // New contents replace original file
+ String readBackData = DFSTestUtil.readFile(DST_FILE);
+ assertEquals(TEST_STRING, readBackData);
+ }
+
+ /**
+ * Test case where the flush() fails at close time - make sure
+ * that we clean up after ourselves and don't touch any
+ * existing file at the destination
+ */
+ @Test
+ public void testFailToFlush() throws IOException {
+ // Create a file at destination
+ FileOutputStream fos = new FileOutputStream(DST_FILE);
+ fos.write(TEST_STRING_2.getBytes());
+ fos.close();
+
+ OutputStream failingStream = createFailingStream();
+ failingStream.write(TEST_STRING.getBytes());
+ try {
+ failingStream.close();
+ fail("Close didn't throw exception");
+ } catch (IOException ioe) {
+ // expected
+ }
+
+ // Should not have touched original file
+ assertEquals(TEST_STRING_2, DFSTestUtil.readFile(DST_FILE));
+
+ assertEquals("Temporary file should have been cleaned up",
+ DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()));
+ }
+
+ /**
+ * Create a stream that fails to flush at close time
+ */
+ private OutputStream createFailingStream() throws FileNotFoundException {
+ return new AtomicFileOutputStream(DST_FILE) {
+ @Override
+ public void flush() throws IOException {
+ throw new IOException("injected failure");
+ }
+ };
+ }
+}
Added: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java?rev=1152295&view=auto
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java (added)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java Fri Jul 29 16:28:45 2011
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.util.MD5FileUtils;
+import org.apache.hadoop.io.MD5Hash;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestMD5FileUtils {
+ private static final File TEST_DIR_ROOT = new File(
+ System.getProperty("test.build.data","build/test/data"));
+ private static final File TEST_DIR = new File(TEST_DIR_ROOT,
+ "TestMD5FileUtils");
+ private static final File TEST_FILE = new File(TEST_DIR,
+ "testMd5File.dat");
+
+ private static final int TEST_DATA_LEN = 128 * 1024; // 128KB test data
+ private static final byte[] TEST_DATA =
+ DFSTestUtil.generateSequentialBytes(0, TEST_DATA_LEN);
+ private static final MD5Hash TEST_MD5 = MD5Hash.digest(TEST_DATA);
+
+ @Before
+ public void setup() throws IOException {
+ FileUtil.fullyDelete(TEST_DIR);
+ assertTrue(TEST_DIR.mkdirs());
+
+ // Write a file out
+ FileOutputStream fos = new FileOutputStream(TEST_FILE);
+ fos.write(TEST_DATA);
+ fos.close();
+ }
+
+ @Test
+ public void testComputeMd5ForFile() throws Exception {
+ MD5Hash computedDigest = MD5FileUtils.computeMd5ForFile(TEST_FILE);
+ assertEquals(TEST_MD5, computedDigest);
+ }
+
+ @Test
+ public void testVerifyMD5FileGood() throws Exception {
+ MD5FileUtils.saveMD5File(TEST_FILE, TEST_MD5);
+ MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+ }
+
+ /**
+ * Test when .md5 file does not exist at all
+ */
+ @Test(expected=IOException.class)
+ public void testVerifyMD5FileMissing() throws Exception {
+ MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+ }
+
+ /**
+ * Test when .md5 file exists but incorrect checksum
+ */
+ @Test
+ public void testVerifyMD5FileBadDigest() throws Exception {
+ MD5FileUtils.saveMD5File(TEST_FILE, MD5Hash.digest(new byte[0]));
+ try {
+ MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+ fail("Did not throw");
+ } catch (IOException ioe) {
+ // Expected
+ }
+ }
+
+ /**
+ * Test when .md5 file exists but has a bad format
+ */
+ @Test
+ public void testVerifyMD5FileBadFormat() throws Exception {
+ FileWriter writer = new FileWriter(MD5FileUtils.getDigestFileForFile(TEST_FILE));
+ try {
+ writer.write("this is not an md5 file");
+ } finally {
+ writer.close();
+ }
+
+ try {
+ MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+ fail("Did not throw");
+ } catch (IOException ioe) {
+ // expected
+ }
+ }
+}
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java?rev=1152295&r1=1152294&r2=1152295&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java Fri Jul 29 16:28:45 2011
@@ -17,16 +17,29 @@
*/
package org.apache.hadoop.test;
+import java.io.File;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Supplier;
+import com.google.common.collect.Sets;
/**
* Test provides some very generic helpers which might be used across the tests
*/
public abstract class GenericTestUtils {
+
/**
* Extracts the name of the method where the invocation has happened
* @return String name of the invoking method
@@ -34,4 +47,133 @@ public abstract class GenericTestUtils {
public static String getMethodName() {
return Thread.currentThread().getStackTrace()[2].getMethodName();
}
+
+ /**
+ * Assert that a given file exists.
+ */
+ public static void assertExists(File f) {
+ Assert.assertTrue("File " + f + " should exist", f.exists());
+ }
+
+ /**
+ * List all of the files in 'dir' that match the regex 'pattern'.
+ * Then check that this list is identical to 'expectedMatches'.
+ * @throws IOException if the dir is inaccessible
+ */
+ public static void assertGlobEquals(File dir, String pattern,
+ String ... expectedMatches) throws IOException {
+
+ Set<String> found = Sets.newTreeSet();
+ for (File f : FileUtil.listFiles(dir)) {
+ if (f.getName().matches(pattern)) {
+ found.add(f.getName());
+ }
+ }
+ Set<String> expectedSet = Sets.newTreeSet(
+ Arrays.asList(expectedMatches));
+ Assert.assertEquals("Bad files matching " + pattern + " in " + dir,
+ Joiner.on(",").join(found),
+ Joiner.on(",").join(expectedSet));
+ }
+
+ public static void assertExceptionContains(String string, Throwable t) {
+ String msg = t.getMessage();
+ Assert.assertTrue(
+ "Unexpected exception:" + StringUtils.stringifyException(t),
+ msg.contains(string));
+ }
+
+ public static void waitFor(Supplier<Boolean> check,
+ int checkEveryMillis, int waitForMillis)
+ throws TimeoutException, InterruptedException
+ {
+ long st = System.currentTimeMillis();
+ do {
+ boolean result = check.get();
+ if (result) {
+ return;
+ }
+
+ Thread.sleep(checkEveryMillis);
+ } while (System.currentTimeMillis() - st < waitForMillis);
+ throw new TimeoutException("Timed out waiting for condition");
+ }
+
+
+ /**
+ * Mockito answer helper that triggers one latch as soon as the
+ * method is called, then waits on another before continuing.
+ */
+ public static class DelayAnswer implements Answer<Object> {
+ private final Log LOG;
+
+ private final CountDownLatch fireLatch = new CountDownLatch(1);
+ private final CountDownLatch waitLatch = new CountDownLatch(1);
+
+
+ public DelayAnswer(Log log) {
+ this.LOG = log;
+ }
+
+ /**
+ * Wait until the method is called.
+ */
+ public void waitForCall() throws InterruptedException {
+ fireLatch.await();
+ }
+
+ /**
+ * Tell the method to proceed.
+ * This should only be called after waitForCall()
+ */
+ public void proceed() {
+ waitLatch.countDown();
+ }
+
+ public Object answer(InvocationOnMock invocation) throws Throwable {
+ LOG.info("DelayAnswer firing fireLatch");
+ fireLatch.countDown();
+ try {
+ LOG.info("DelayAnswer waiting on waitLatch");
+ waitLatch.await();
+ LOG.info("DelayAnswer delay complete");
+ } catch (InterruptedException ie) {
+ throw new IOException("Interrupted waiting on latch", ie);
+ }
+ return passThrough(invocation);
+ }
+
+ protected Object passThrough(InvocationOnMock invocation) throws Throwable {
+ return invocation.callRealMethod();
+ }
+ }
+
+ /**
+ * An Answer implementation that simply forwards all calls through
+ * to a delegate.
+ *
+ * This is useful as the default Answer for a mock object, to create
+ * something like a spy on an RPC proxy. For example:
+ * <code>
+ * NamenodeProtocol origNNProxy = secondary.getNameNode();
+ * NamenodeProtocol spyNNProxy = Mockito.mock(NameNodeProtocol.class,
+ * new DelegateAnswer(origNNProxy);
+ * doThrow(...).when(spyNNProxy).getBlockLocations(...);
+ * ...
+ * </code>
+ */
+ public static class DelegateAnswer implements Answer<Object> {
+ private final Object delegate;
+
+ public DelegateAnswer(Object delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public Object answer(InvocationOnMock invocation) throws Throwable {
+ return invocation.getMethod().invoke(
+ delegate, invocation.getArguments());
+ }
+ }
+
}