You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by to...@apache.org on 2007/04/17 21:58:34 UTC
svn commit: r529744 - in
/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs: DFSClient.java
DataNode.java FSDirectory.java FSNamesystem.java JspHelper.java
NamenodeFsck.java SecondaryNameNode.java
Author: tomwhite
Date: Tue Apr 17 12:58:32 2007
New Revision: 529744
URL: http://svn.apache.org/viewvc?view=rev&rev=529744
Log:
HADOOP-1190. Fix unchecked warnings in dfs package.
Modified:
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?view=diff&rev=529744&r1=529743&r2=529744
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Tue Apr 17 12:58:32 2007
@@ -59,7 +59,8 @@
* A map from name -> DFSOutputStream of files that are currently being
* written by this client.
*/
- private TreeMap pendingCreates = new TreeMap();
+ private TreeMap<String, OutputStream> pendingCreates =
+ new TreeMap<String, OutputStream>();
/**
* A class to track the list of DFS clients, so that they can be closed
@@ -67,16 +68,14 @@
* @author Owen O'Malley
*/
private static class ClientFinalizer extends Thread {
- private List clients = new ArrayList();
+ private List<DFSClient> clients = new ArrayList<DFSClient>();
public synchronized void addClient(DFSClient client) {
clients.add(client);
}
public synchronized void run() {
- Iterator itr = clients.iterator();
- while (itr.hasNext()) {
- DFSClient client = (DFSClient) itr.next();
+ for (DFSClient client : clients) {
if (client.running) {
try {
client.close();
@@ -529,13 +528,13 @@
Block oldBlocks[] = this.blocks;
LocatedBlock results[] = namenode.open(src);
- Vector blockV = new Vector();
- Vector nodeV = new Vector();
+ Vector<Block> blockV = new Vector<Block>();
+ Vector<DatanodeInfo[]> nodeV = new Vector<DatanodeInfo[]>();
for (int i = 0; i < results.length; i++) {
blockV.add(results[i].getBlock());
nodeV.add(results[i].getLocations());
}
- Block newBlocks[] = (Block[]) blockV.toArray(new Block[blockV.size()]);
+ Block[] newBlocks = blockV.toArray(new Block[blockV.size()]);
if (oldBlocks != null) {
for (int i = 0; i < oldBlocks.length; i++) {
@@ -548,7 +547,7 @@
}
}
this.blocks = newBlocks;
- this.nodes = (DatanodeInfo[][]) nodeV.toArray(new DatanodeInfo[nodeV.size()][]);
+ this.nodes = nodeV.toArray(new DatanodeInfo[nodeV.size()][]);
this.currentNode = null;
}
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?view=diff&rev=529744&r1=529743&r2=529744
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Tue Apr 17 12:58:32 2007
@@ -111,7 +111,7 @@
DatanodeRegistration dnRegistration = null;
private String networkLoc;
volatile boolean shouldRun = true;
- Vector receivedBlockList = new Vector();
+ Vector<Block> receivedBlockList = new Vector<Block>();
int xmitsInProgress = 0;
Daemon dataXceiveServer = null;
long blockReportInterval;
@@ -456,7 +456,7 @@
//
// Send newly-received blockids to namenode
//
- blockArray = (Block[]) receivedBlockList.toArray(new Block[receivedBlockList.size()]);
+ blockArray = receivedBlockList.toArray(new Block[receivedBlockList.size()]);
}
}
if( blockArray != null ) {
@@ -799,7 +799,7 @@
//
// Track all the places we've successfully written the block
//
- Vector mirrors = new Vector();
+ Vector<DatanodeInfo> mirrors = new Vector<DatanodeInfo>();
//
// Open local disk out
@@ -998,7 +998,7 @@
//
reply.writeLong(WRITE_COMPLETE);
mirrors.add(curTarget);
- LocatedBlock newLB = new LocatedBlock(b, (DatanodeInfo[]) mirrors.toArray(new DatanodeInfo[mirrors.size()]));
+ LocatedBlock newLB = new LocatedBlock(b, mirrors.toArray(new DatanodeInfo[mirrors.size()]));
newLB.write(reply);
} finally {
reply.close();
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java?view=diff&rev=529744&r1=529743&r2=529744
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java Tue Apr 17 12:58:32 2007
@@ -135,7 +135,7 @@
} else if (parent == null && "/".equals(target)) {
return this;
} else {
- Vector components = new Vector();
+ Vector<String> components = new Vector<String>();
int start = 0;
int slashid = 0;
while (start < target.length() && (slashid = target.indexOf('/', start)) >= 0) {
@@ -151,8 +151,8 @@
/**
*/
- INode getNode(Vector components, int index) {
- if (! name.equals((String) components.elementAt(index))) {
+ INode getNode(Vector<String> components, int index) {
+ if (! name.equals(components.elementAt(index))) {
return null;
}
if (index == components.size()-1) {
@@ -160,7 +160,7 @@
}
// Check with children
- INode child = this.getChild((String)components.elementAt(index+1));
+ INode child = this.getChild(components.elementAt(index+1));
if (child == null) {
return null;
} else {
@@ -225,7 +225,7 @@
* This operation is performed after a node is removed from the tree,
* and we want to GC all the blocks at this node and below.
*/
- void collectSubtreeBlocks(Vector v) {
+ void collectSubtreeBlocks(Vector<Block> v) {
if (blocks != null) {
for (int i = 0; i < blocks.length; i++) {
v.add(blocks[i]);
@@ -296,7 +296,7 @@
/**
*/
- void listContents(Vector v) {
+ void listContents(Vector<INode> v) {
if (parent != null && blocks != null) {
v.add(this);
}
@@ -310,7 +310,8 @@
FSNamesystem namesystem = null;
INode rootDir = new INode("");
- TreeMap activeLocks = new TreeMap();
+ TreeMap<UTF8, TreeSet<UTF8>> activeLocks =
+ new TreeMap<UTF8, TreeSet<UTF8>>();
FSImage fsImage;
boolean ready = false;
// Metrics record
@@ -498,7 +499,7 @@
*/
Block[] setReplication( String src,
short replication,
- Vector oldReplication
+ Vector<Integer> oldReplication
) throws IOException {
waitForReady();
Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication );
@@ -509,10 +510,10 @@
Block[] unprotectedSetReplication( String src,
short replication,
- Vector oldReplication
+ Vector<Integer> oldReplication
) throws IOException {
if( oldReplication == null )
- oldReplication = new Vector();
+ oldReplication = new Vector<Integer>();
oldReplication.setSize(1);
oldReplication.set( 0, new Integer(-1) );
Block[] fileBlocks = null;
@@ -583,13 +584,12 @@
} else {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+src+" is removed" );
- Vector v = new Vector();
+ Vector<Block> v = new Vector<Block>();
targetNode.collectSubtreeBlocks(v);
- for (Iterator it = v.iterator(); it.hasNext(); ) {
- Block b = (Block) it.next();
+ for (Block b : v) {
namesystem.blocksMap.removeINode(b);
}
- return (Block[]) v.toArray(new Block[v.size()]);
+ return v.toArray(new Block[v.size()]);
}
}
}
@@ -598,9 +598,9 @@
/**
*/
public int obtainLock(UTF8 src, UTF8 holder, boolean exclusive) {
- TreeSet holders = (TreeSet) activeLocks.get(src);
+ TreeSet<UTF8> holders = activeLocks.get(src);
if (holders == null) {
- holders = new TreeSet();
+ holders = new TreeSet<UTF8>();
activeLocks.put(src, holders);
}
if (exclusive && holders.size() > 0) {
@@ -640,13 +640,13 @@
if (targetNode == null) {
return null;
} else {
- Vector contents = new Vector();
+ Vector<INode> contents = new Vector<INode>();
targetNode.listContents(contents);
DFSFileInfo listing[] = new DFSFileInfo[contents.size()];
int i = 0;
- for (Iterator it = contents.iterator(); it.hasNext(); i++) {
- listing[i] = new DFSFileInfo( (INode) it.next() );
+ for (Iterator<INode> it = contents.iterator(); it.hasNext(); i++) {
+ listing[i] = new DFSFileInfo(it.next());
}
return listing;
}
@@ -701,7 +701,7 @@
src = normalizePath(new UTF8(src));
// Use this to collect all the dirs we need to construct
- Vector v = new Vector();
+ Vector<String> v = new Vector<String>();
// The dir itself
v.add(src);
@@ -717,7 +717,7 @@
// the way
int numElts = v.size();
for (int i = numElts - 1; i >= 0; i--) {
- String cur = (String) v.elementAt(i);
+ String cur = v.elementAt(i);
try {
INode inserted = unprotectedMkdir(cur);
if (inserted != null) {
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=529744&r1=529743&r2=529744
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Tue Apr 17 12:58:32 2007
@@ -412,12 +412,12 @@
*/
private class UnderReplicatedBlocks {
private static final int LEVEL = 3;
- TreeSet<Block>[] priorityQueues = new TreeSet[LEVEL];
+ List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>();
/* constructor */
UnderReplicatedBlocks() {
for(int i=0; i<LEVEL; i++) {
- priorityQueues[i] = new TreeSet<Block>();
+ priorityQueues.add(new TreeSet<Block>());
}
}
@@ -425,7 +425,7 @@
synchronized int size() {
int size = 0;
for( int i=0; i<LEVEL; i++ ) {
- size += priorityQueues[i].size();
+ size += priorityQueues.get(i).size();
}
return size;
}
@@ -467,7 +467,7 @@
return false;
}
int priLevel = getPriority(block, curReplicas, expectedReplicas);
- if( priorityQueues[priLevel].add(block) ) {
+ if( priorityQueues.get(priLevel).add(block) ) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.add:"
+ block.getBlockName()
@@ -498,7 +498,7 @@
/* remove a block from a under replication queue given a priority*/
private boolean remove(Block block, int priLevel ) {
if( priLevel >= 0 && priLevel < LEVEL
- && priorityQueues[priLevel].remove(block) ) {
+ && priorityQueues.get(priLevel).remove(block) ) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
+ "Removing block " + block.getBlockName()
@@ -506,7 +506,7 @@
return true;
} else {
for(int i=0; i<LEVEL; i++) {
- if( i!=priLevel && priorityQueues[i].remove(block) ) {
+ if( i!=priLevel && priorityQueues.get(i).remove(block) ) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
+ "Removing block " + block.getBlockName()
@@ -546,7 +546,7 @@
remove(block, oldPri);
}
if( curPri != LEVEL && oldPri != curPri
- && priorityQueues[curPri].add(block)) {
+ && priorityQueues.get(curPri).add(block)) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.update:"
+ block.getBlockName()
@@ -561,33 +561,33 @@
synchronized Iterator<Block> iterator() {
return new Iterator<Block>() {
int level;
- Iterator<Block>[] iterator = new Iterator[LEVEL];
+ List<Iterator<Block>> iterators = new ArrayList<Iterator<Block>>();
{
level=0;
for(int i=0; i<LEVEL; i++) {
- iterator[i] = priorityQueues[i].iterator();
+ iterators.add(priorityQueues.get(i).iterator());
}
}
private void update() {
- while( level< LEVEL-1 && !iterator[level].hasNext() ) {
+ while( level< LEVEL-1 && !iterators.get(level).hasNext() ) {
level++;
}
}
public Block next() {
update();
- return iterator[level].next();
+ return iterators.get(level).next();
}
public boolean hasNext() {
update();
- return iterator[level].hasNext();
+ return iterators.get(level).hasNext();
}
public void remove() {
- iterator[level].remove();
+ iterators.get(level).remove();
}
};
}
@@ -2629,13 +2629,13 @@
StringBuffer blockList = new StringBuffer();
for (int i = 0; i < sendBlock.size(); i++) {
blockList.append(' ');
- Block block = (Block) sendBlock.get(i);
+ Block block = sendBlock.get(i);
blockList.append(block.getBlockName());
}
NameNode.stateChangeLog.debug("BLOCK* NameSystem.blockToInvalidate: "
+"ask "+nodeID.getName()+" to delete " + blockList );
}
- return (Block[]) sendBlock.toArray(new Block[sendBlock.size()]);
+ return sendBlock.toArray(new Block[sendBlock.size()]);
}
/*
@@ -4042,6 +4042,7 @@
* @author Milind Bhandarkar
*/
public static class FsckServlet extends HttpServlet {
+ @SuppressWarnings("unchecked")
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
@@ -4069,6 +4070,7 @@
* @author Dhruba Borthakur
*/
public static class GetImageServlet extends HttpServlet {
+ @SuppressWarnings("unchecked")
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java?view=diff&rev=529744&r1=529743&r2=529744
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java Tue Apr 17 12:58:32 2007
@@ -48,7 +48,7 @@
}
}
public DatanodeInfo bestNode(LocatedBlock blk) throws IOException {
- TreeSet deadNodes = new TreeSet();
+ TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
DatanodeInfo chosenNode = null;
int failures = 0;
Socket s = null;
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?view=diff&rev=529744&r1=529743&r2=529744
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Tue Apr 17 12:58:32 2007
@@ -308,7 +308,7 @@
OutputStream fos) throws Exception {
int failures = 0;
InetSocketAddress targetAddr = null;
- TreeSet deadNodes = new TreeSet();
+ TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
Socket s = null;
DataInputStream in = null;
DataOutputStream out = null;
@@ -400,7 +400,7 @@
*/
Random r = new Random();
private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
- TreeSet deadNodes) throws IOException {
+ TreeSet<DatanodeInfo> deadNodes) throws IOException {
if ((nodes == null) ||
(nodes.length - deadNodes.size() < 1)) {
throw new IOException("No live nodes contain current block");
@@ -463,7 +463,7 @@
* @author Andrzej Bialecki
*/
public class FsckResult {
- private ArrayList missingIds = new ArrayList();
+ private ArrayList<String> missingIds = new ArrayList<String>();
private long missingSize = 0L;
private long corruptFiles = 0L;
private long overReplicatedBlocks = 0L;
@@ -490,7 +490,7 @@
}
/** Return a list of missing block names (as list of Strings). */
- public ArrayList getMissingIds() {
+ public ArrayList<String> getMissingIds() {
return missingIds;
}
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java?view=diff&rev=529744&r1=529743&r2=529744
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java Tue Apr 17 12:58:32 2007
@@ -425,6 +425,7 @@
* @author Dhruba Borthakur
*/
public static class GetImageServlet extends HttpServlet {
+ @SuppressWarnings("unchecked")
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {