You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@commons.apache.org by gg...@apache.org on 2012/09/11 21:47:51 UTC
svn commit: r1383583 -
/commons/proper/csv/trunk/src/main/java/org/apache/commons/csv/CSVLexer.java
Author: ggregory
Date: Tue Sep 11 19:47:51 2012
New Revision: 1383583
URL: http://svn.apache.org/viewvc?rev=1383583&view=rev
Log:
Rename pname from 'tkn' to 'token'.
Modified:
commons/proper/csv/trunk/src/main/java/org/apache/commons/csv/CSVLexer.java
Modified: commons/proper/csv/trunk/src/main/java/org/apache/commons/csv/CSVLexer.java
URL: http://svn.apache.org/viewvc/commons/proper/csv/trunk/src/main/java/org/apache/commons/csv/CSVLexer.java?rev=1383583&r1=1383582&r2=1383583&view=diff
==============================================================================
--- commons/proper/csv/trunk/src/main/java/org/apache/commons/csv/CSVLexer.java (original)
+++ commons/proper/csv/trunk/src/main/java/org/apache/commons/csv/CSVLexer.java Tue Sep 11 19:47:51 2012
@@ -33,14 +33,14 @@ class CSVLexer extends Lexer {
* <p/>
* A token corresponds to a term, a record change or an end-of-file indicator.
*
- * @param tkn
+ * @param token
* an existing Token object to reuse. The caller is responsible to initialize the Token.
* @return the next token found
* @throws java.io.IOException
* on stream access error
*/
@Override
- Token nextToken(Token tkn) throws IOException {
+ Token nextToken(Token token) throws IOException {
// get the last read char (required for empty line detection)
int lastChar = in.readAgain();
@@ -62,29 +62,29 @@ class CSVLexer extends Lexer {
eol = isEndOfLine(c);
// reached end of file without any content (empty line at the end)
if (isEndOfFile(c)) {
- tkn.type = EOF;
+ token.type = EOF;
// don't set tkn.isReady here because no content
- return tkn;
+ return token;
}
}
}
// did we reach eof during the last iteration already ? EOF
if (isEndOfFile(lastChar) || (!isDelimiter(lastChar) && isEndOfFile(c))) {
- tkn.type = EOF;
+ token.type = EOF;
// don't set tkn.isReady here because no content
- return tkn;
+ return token;
}
if (isStartOfLine(lastChar) && isCommentStart(c)) {
String comment = in.readLine().trim();
- tkn.content.append(comment);
- tkn.type = COMMENT;
- return tkn;
+ token.content.append(comment);
+ token.type = COMMENT;
+ return token;
}
// important: make sure a new char gets consumed in each iteration
- while (tkn.type == INVALID) {
+ while (token.type == INVALID) {
// ignore whitespaces at beginning of a token
if (surroundingSpacesIgnored) {
while (isWhitespace(c) && !eol) {
@@ -96,26 +96,26 @@ class CSVLexer extends Lexer {
// ok, start of token reached: encapsulated, or token
if (isDelimiter(c)) {
// empty token return TOKEN("")
- tkn.type = TOKEN;
+ token.type = TOKEN;
} else if (eol) {
// empty token return EORECORD("")
// noop: tkn.content.append("");
- tkn.type = EORECORD;
+ token.type = EORECORD;
} else if (isEncapsulator(c)) {
// consume encapsulated token
- encapsulatedTokenLexer(tkn);
+ encapsulatedTokenLexer(token);
} else if (isEndOfFile(c)) {
// end of file return EOF()
// noop: tkn.content.append("");
- tkn.type = EOF;
- tkn.isReady = true; // there is data at EOF
+ token.type = EOF;
+ token.isReady = true; // there is data at EOF
} else {
// next token must be a simple token
// add removed blanks when not ignoring whitespace chars...
- simpleTokenLexer(tkn, c);
+ simpleTokenLexer(token, c);
}
}
- return tkn;
+ return token;
}
/**