Author: ggregory
Date: Sun Jul 31 18:14:57 2005
New Revision: 226712
URL: http://svn.apache.org/viewcvs?rev=226712&view=rev
Log:
Better unit test coverage for StrTokenizer.java.
Modified:
jakarta/commons/proper/lang/trunk/src/java/org/apache/commons/lang/text/StrTokenizer.java
jakarta/commons/proper/lang/trunk/src/test/org/apache/commons/lang/text/StrTokenizerTest.java
Modified:
jakarta/commons/proper/lang/trunk/src/java/org/apache/commons/lang/text/StrTokenizer.java
URL:
http://svn.apache.org/viewcvs/jakarta/commons/proper/lang/trunk/src/java/org/apache/commons/lang/text/StrTokenizer.java?rev=226712&r1=226711&r2=226712&view=diff
==============================================================================
---
jakarta/commons/proper/lang/trunk/src/java/org/apache/commons/lang/text/StrTokenizer.java
(original)
+++
jakarta/commons/proper/lang/trunk/src/java/org/apache/commons/lang/text/StrTokenizer.java
Sun Jul 31 18:14:57 2005
@@ -224,6 +224,15 @@
//-----------------------------------------------------------------------
/**
+ * Returns a clone of <code>CSV_TOKENIZER_PROTOTYPE</code>.
+ *
+ * @return a clone of <code>CSV_TOKENIZER_PROTOTYPE</code>.
+ */
+ private static StrTokenizer getCSVClone() {
+ return (StrTokenizer) CSV_TOKENIZER_PROTOTYPE.clone();
+ }
+
+ /**
* Gets a new tokenizer instance which parses Comma Seperated Value strings
* initializing it with the given input. The default for CSV processing
* will be trim whitespace from both ends (which can be overriden with
@@ -233,7 +242,7 @@
* @return a new tokenizer instance which parses Comma Seperated Value
strings
*/
public static StrTokenizer getCSVInstance() {
- return (StrTokenizer)CSV_TOKENIZER_PROTOTYPE.clone();
+ return getCSVClone();
}
/**
@@ -246,7 +255,7 @@
* @return a new tokenizer instance which parses Comma Seperated Value
strings
*/
public static StrTokenizer getCSVInstance(String input) {
- StrTokenizer tok = (StrTokenizer)(CSV_TOKENIZER_PROTOTYPE.clone());
+ StrTokenizer tok = getCSVClone();
tok.reset(input);
return tok;
}
@@ -261,12 +270,22 @@
* @return a new tokenizer instance which parses Comma Seperated Value
strings
*/
public static StrTokenizer getCSVInstance(char[] input) {
- StrTokenizer tok = (StrTokenizer)(CSV_TOKENIZER_PROTOTYPE.clone());
+ StrTokenizer tok = getCSVClone();
tok.reset(input);
return tok;
}
/**
+ * Returns a clone of <code>TSV_TOKENIZER_PROTOTYPE</code>.
+ *
+ * @return a clone of <code>TSV_TOKENIZER_PROTOTYPE</code>.
+ */
+ private static StrTokenizer getTSVClone() {
+ return (StrTokenizer) TSV_TOKENIZER_PROTOTYPE.clone();
+ }
+
+
+ /**
* Gets a new tokenizer instance which parses Tab Seperated Value strings.
* The default for CSV processing will be trim whitespace from both ends
* (which can be overriden with the setTrimmer method).
@@ -275,7 +294,7 @@
* @return a new tokenizer instance which parses Tab Seperated Value
strings.
*/
public static StrTokenizer getTSVInstance() {
- return (StrTokenizer)TSV_TOKENIZER_PROTOTYPE.clone();
+ return getTSVClone();
}
/**
@@ -286,7 +305,7 @@
* @return a new tokenizer instance which parses Tab Seperated Value
strings.
*/
public static StrTokenizer getTSVInstance(String input) {
- StrTokenizer tok = (StrTokenizer)(TSV_TOKENIZER_PROTOTYPE.clone());
+ StrTokenizer tok = getTSVClone();
tok.reset(input);
return tok;
}
@@ -299,7 +318,7 @@
* @return a new tokenizer instance which parses Tab Seperated Value
strings.
*/
public static StrTokenizer getTSVInstance(char[] input) {
- StrTokenizer tok = (StrTokenizer)(TSV_TOKENIZER_PROTOTYPE.clone());
+ StrTokenizer tok = getTSVClone();
tok.reset(input);
return tok;
}
Modified:
jakarta/commons/proper/lang/trunk/src/test/org/apache/commons/lang/text/StrTokenizerTest.java
URL:
http://svn.apache.org/viewcvs/jakarta/commons/proper/lang/trunk/src/test/org/apache/commons/lang/text/StrTokenizerTest.java?rev=226712&r1=226711&r2=226712&view=diff
==============================================================================
---
jakarta/commons/proper/lang/trunk/src/test/org/apache/commons/lang/text/StrTokenizerTest.java
(original)
+++
jakarta/commons/proper/lang/trunk/src/test/org/apache/commons/lang/text/StrTokenizerTest.java
Sun Jul 31 18:14:57 2005
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.commons.lang.text;
import junit.framework.Test;
@@ -24,17 +25,17 @@
/**
* Unit test for Tokenizer.
- *
+ *
* @author Matthew Inger
*/
public class StrTokenizerTest extends TestCase {
-
- /**
- * JUnit constructor.
- * @param name
- */
- public StrTokenizerTest(String name) {
- super(name);
+
+ private static final String CSV_SIMPLE_FIXTURE = "A,b,c";
+
+ private static final String TSV_SIMPLE_FIXTURE = "A\tb\tc";
+
+ public static void main(String[] args) {
+ TestRunner.run(suite());
}
public static Test suite() {
@@ -43,12 +44,21 @@
return suite;
}
+ /**
+ * JUnit constructor.
+ *
+ * @param name
+ */
+ public StrTokenizerTest(String name) {
+ super(name);
+ }
- public static void main(String[] args) {
- TestRunner.run(suite());
+ private void checkClone(StrTokenizer tokenizer) {
+ assertFalse(StrTokenizer.getCSVInstance() == tokenizer);
+ assertFalse(StrTokenizer.getTSVInstance() == tokenizer);
}
- //-----------------------------------------------------------------------
+ // -----------------------------------------------------------------------
public void test1() {
String input = "a;b;c;\"d;\"\"e\";f; ; ;";
@@ -57,31 +67,18 @@
tok.setQuoteChar('"');
tok.setIgnoredMatcher(StrTokenizer.TRIM_MATCHER);
tok.setIgnoreEmptyTokens(false);
- String tokens [] = tok.getAllTokens();
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "b",
- "c",
- "d;\"e",
- "f",
- "",
- "",
- "",
- };
+ String expected[] = new String[]{"a", "b", "c", "d;\"e", "f", "", "",
"",};
assertTrue(tokens.length == expected.length);
for (int i = 0; i < expected.length; i++) {
- assertTrue("token[" + i + "] was '" + tokens[i]
- + "' but was expected to be '" + expected[i]
- + "'",
+ assertTrue("token[" + i + "] was '" + tokens[i] + "' but was
expected to be '" + expected[i] + "'",
ObjectUtils.equals(expected[i], tokens[i]));
}
}
-
public void test2() {
String input = "a;b;c ;\"d;\"\"e\";f; ; ;";
@@ -90,31 +87,18 @@
tok.setQuoteChar('"');
tok.setIgnoredMatcher(StrTokenizer.NONE_MATCHER);
tok.setIgnoreEmptyTokens(false);
- String tokens [] = tok.getAllTokens();
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "b",
- "c ",
- "d;\"e",
- "f",
- " ",
- " ",
- "",
- };
+ String expected[] = new String[]{"a", "b", "c ", "d;\"e", "f", " ", "
", "",};
assertTrue(tokens.length == expected.length);
for (int i = 0; i < expected.length; i++) {
- assertTrue("token[" + i + "] was '" + tokens[i]
- + "' but was expected to be '" + expected[i]
- + "'",
+ assertTrue("token[" + i + "] was '" + tokens[i] + "' but was
expected to be '" + expected[i] + "'",
ObjectUtils.equals(expected[i], tokens[i]));
}
}
-
public void test3() {
String input = "a;b; c;\"d;\"\"e\";f; ; ;";
@@ -123,31 +107,18 @@
tok.setQuoteChar('"');
tok.setIgnoredMatcher(StrTokenizer.NONE_MATCHER);
tok.setIgnoreEmptyTokens(false);
- String tokens [] = tok.getAllTokens();
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "b",
- " c",
- "d;\"e",
- "f",
- " ",
- " ",
- "",
- };
+ String expected[] = new String[]{"a", "b", " c", "d;\"e", "f", " ", "
", "",};
assertTrue(tokens.length == expected.length);
for (int i = 0; i < expected.length; i++) {
- assertTrue("token[" + i + "] was '" + tokens[i]
- + "' but was expected to be '" + expected[i]
- + "'",
+ assertTrue("token[" + i + "] was '" + tokens[i] + "' but was
expected to be '" + expected[i] + "'",
ObjectUtils.equals(expected[i], tokens[i]));
}
}
-
public void test4() {
String input = "a;b; c;\"d;\"\"e\";f; ; ;";
@@ -156,28 +127,18 @@
tok.setQuoteChar('"');
tok.setIgnoredMatcher(StrTokenizer.TRIM_MATCHER);
tok.setIgnoreEmptyTokens(true);
- String tokens [] = tok.getAllTokens();
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "b",
- "c",
- "d;\"e",
- "f",
- };
+ String expected[] = new String[]{"a", "b", "c", "d;\"e", "f",};
assertTrue(tokens.length == expected.length);
for (int i = 0; i < expected.length; i++) {
- assertTrue("token[" + i + "] was '" + tokens[i]
- + "' but was expected to be '" + expected[i]
- + "'",
+ assertTrue("token[" + i + "] was '" + tokens[i] + "' but was
expected to be '" + expected[i] + "'",
ObjectUtils.equals(expected[i], tokens[i]));
}
}
-
public void test5() {
String input = "a;b; c;\"d;\"\"e\";f; ; ;";
@@ -187,31 +148,18 @@
tok.setIgnoredMatcher(StrTokenizer.TRIM_MATCHER);
tok.setIgnoreEmptyTokens(false);
tok.setEmptyTokenAsNull(true);
- String tokens [] = tok.getAllTokens();
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "b",
- "c",
- "d;\"e",
- "f",
- null,
- null,
- null,
- };
+ String expected[] = new String[]{"a", "b", "c", "d;\"e", "f", null,
null, null,};
assertTrue(tokens.length == expected.length);
for (int i = 0; i < expected.length; i++) {
- assertTrue("token[" + i + "] was '" + tokens[i]
- + "' but was expected to be '" + expected[i]
- + "'",
+ assertTrue("token[" + i + "] was '" + tokens[i] + "' but was
expected to be '" + expected[i] + "'",
ObjectUtils.equals(expected[i], tokens[i]));
}
}
-
public void test6() {
String input = "a;b; c;\"d;\"\"e\";f; ; ;";
@@ -220,20 +168,10 @@
tok.setQuoteChar('"');
tok.setIgnoredMatcher(StrTokenizer.TRIM_MATCHER);
tok.setIgnoreEmptyTokens(false);
-// tok.setTreatingEmptyAsNull(true);
- String tokens [] = tok.getAllTokens();
+ // tok.setTreatingEmptyAsNull(true);
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "b",
- " c",
- "d;\"e",
- "f",
- null,
- null,
- null,
- };
+ String expected[] = new String[]{"a", "b", " c", "d;\"e", "f", null,
null, null,};
int nextCount = 0;
while (tok.hasNext()) {
@@ -249,17 +187,14 @@
assertTrue(tokens.length == expected.length);
- assertTrue("could not cycle through entire token list"
- + " using the 'hasNext' and 'next' methods",
+ assertTrue("could not cycle through entire token list" + " using the
'hasNext' and 'next' methods",
nextCount == expected.length);
- assertTrue("could not cycle through entire token list"
- + " using the 'hasPrevious' and 'previous' methods",
+ assertTrue("could not cycle through entire token list" + " using the
'hasPrevious' and 'previous' methods",
prevCount == expected.length);
}
-
public void test7() {
String input = "a b c \"d e\" f ";
@@ -268,31 +203,18 @@
tok.setQuoteMatcher(StrTokenizer.DOUBLE_QUOTE_MATCHER);
tok.setIgnoredMatcher(StrTokenizer.NONE_MATCHER);
tok.setIgnoreEmptyTokens(false);
- String tokens [] = tok.getAllTokens();
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "",
- "",
- "b",
- "c",
- "d e",
- "f",
- "",
- };
+ String expected[] = new String[]{"a", "", "", "b", "c", "d e", "f",
"",};
assertTrue(tokens.length == expected.length);
for (int i = 0; i < expected.length; i++) {
- assertTrue("token[" + i + "] was '" + tokens[i]
- + "' but was expected to be '" + expected[i]
- + "'",
+ assertTrue("token[" + i + "] was '" + tokens[i] + "' but was
expected to be '" + expected[i] + "'",
ObjectUtils.equals(expected[i], tokens[i]));
}
}
-
public void test8() {
String input = "a b c \"d e\" f ";
@@ -301,22 +223,13 @@
tok.setQuoteMatcher(StrTokenizer.DOUBLE_QUOTE_MATCHER);
tok.setIgnoredMatcher(StrTokenizer.NONE_MATCHER);
tok.setIgnoreEmptyTokens(true);
- String tokens [] = tok.getAllTokens();
+ String tokens[] = tok.getAllTokens();
- String expected[] = new String[]
- {
- "a",
- "b",
- "c",
- "d e",
- "f",
- };
+ String expected[] = new String[]{"a", "b", "c", "d e", "f",};
assertTrue(tokens.length == expected.length);
for (int i = 0; i < expected.length; i++) {
- assertTrue("token[" + i + "] was '" + tokens[i]
- + "' but was expected to be '" + expected[i]
- + "'",
+ assertTrue("token[" + i + "] was '" + tokens[i] + "' but was
expected to be '" + expected[i] + "'",
ObjectUtils.equals(expected[i], tokens[i]));
}
@@ -329,7 +242,7 @@
assertEquals("b", tok.next());
assertEquals("c", tok.next());
}
-
+
public void testBasic2() {
String input = "a \nb\fc";
StrTokenizer tok = new StrTokenizer(input);
@@ -337,7 +250,7 @@
assertEquals("b", tok.next());
assertEquals("c", tok.next());
}
-
+
public void testBasic3() {
String input = "a \nb\u0001\fc";
StrTokenizer tok = new StrTokenizer(input);
@@ -345,7 +258,7 @@
assertEquals("b\u0001", tok.next());
assertEquals("c", tok.next());
}
-
+
public void testBasic4() {
String input = "a \"b\" c";
StrTokenizer tok = new StrTokenizer(input);
@@ -353,15 +266,7 @@
assertEquals("\"b\"", tok.next());
assertEquals("c", tok.next());
}
-
- public void testBasicQuoted1() {
- String input = "a \"b\" c";
- StrTokenizer tok = new StrTokenizer(input, ' ', '"');
- assertEquals("a", tok.next());
- assertEquals("b", tok.next());
- assertEquals("c", tok.next());
- }
-
+
public void testBasicDelim1() {
String input = "a:b:c";
StrTokenizer tok = new StrTokenizer(input, ':');
@@ -369,13 +274,13 @@
assertEquals("b", tok.next());
assertEquals("c", tok.next());
}
-
+
public void testBasicDelim2() {
String input = "a:b:c";
StrTokenizer tok = new StrTokenizer(input, ',');
assertEquals("a:b:c", tok.next());
}
-
+
public void testBasicEmpty1() {
String input = "a b c";
StrTokenizer tok = new StrTokenizer(input);
@@ -385,7 +290,7 @@
assertEquals("b", tok.next());
assertEquals("c", tok.next());
}
-
+
public void testBasicEmpty2() {
String input = "a b c";
StrTokenizer tok = new StrTokenizer(input);
@@ -396,16 +301,67 @@
assertEquals("b", tok.next());
assertEquals("c", tok.next());
}
-
+
+ public void testBasicQuoted1() {
+ String input = "a \"b\" c";
+ StrTokenizer tok = new StrTokenizer(input, ' ', '"');
+ assertEquals("a", tok.next());
+ assertEquals("b", tok.next());
+ assertEquals("c", tok.next());
+ }
+
+ public void testCSV(String data) {
+ this.testXSVAbc(StrTokenizer.getCSVInstance(data));
+ this.testXSVAbc(StrTokenizer.getCSVInstance(data.toCharArray()));
+ }
+
+ public void testCSVEmpty() {
+ this.testEmpty(StrTokenizer.getCSVInstance());
+ this.testEmpty(StrTokenizer.getCSVInstance(""));
+ }
+
+ public void testCSVSimple() {
+ this.testCSV(CSV_SIMPLE_FIXTURE);
+ }
+
+ public void testCSVSimpleNeedsTrim() {
+ this.testCSV(" " + CSV_SIMPLE_FIXTURE);
+ this.testCSV(" \n\t " + CSV_SIMPLE_FIXTURE);
+ this.testCSV(" \n " + CSV_SIMPLE_FIXTURE + "\n\n\r");
+ }
+
+ void testEmpty(StrTokenizer tokenizer) {
+ this.checkClone(tokenizer);
+ assertEquals(false, tokenizer.hasNext());
+ assertEquals(false, tokenizer.hasPrevious());
+ assertEquals(null, tokenizer.next());
+ assertEquals(null, tokenizer.nextToken());
+ assertEquals(0, tokenizer.size());
+ }
+
public void testGetContent() {
String input = "a b c \"d e\" f ";
StrTokenizer tok = new StrTokenizer(input);
assertSame(input, tok.getContent());
-
+
tok = new StrTokenizer(input.toCharArray());
assertEquals(input, tok.getContent());
}
+ public void testMatcher() {
+ assertEquals(1, StrTokenizer.SPACE_MATCHER.isMatch(new char[]{' '}, 1,
0));
+ assertEquals(0, StrTokenizer.SPACE_MATCHER.isMatch(new char[]{'\n'},
1, 0));
+ assertEquals(0, StrTokenizer.SPACE_MATCHER.isMatch(new
char[]{'\u0001'}, 1, 0));
+
+ assertEquals(1, StrTokenizer.TRIM_MATCHER.isMatch(new char[]{' '}, 1,
0));
+ assertEquals(1, StrTokenizer.TRIM_MATCHER.isMatch(new char[]{'\n'}, 1,
0));
+ assertEquals(1, StrTokenizer.TRIM_MATCHER.isMatch(new
char[]{'\u0001'}, 1, 0));
+
+ assertEquals(1, StrTokenizer.SPLIT_MATCHER.isMatch(new char[]{' '}, 1,
0));
+ assertEquals(1, StrTokenizer.SPLIT_MATCHER.isMatch(new char[]{'\n'},
1, 0));
+ assertEquals(0, StrTokenizer.SPLIT_MATCHER.isMatch(new
char[]{'\u0001'}, 1, 0));
+ }
+
public void testReset() {
String input = "a b c";
StrTokenizer tok = new StrTokenizer(input);
@@ -423,19 +379,40 @@
assertEquals("f", tok.next());
assertEquals("g", tok.next());
}
-
- public void testMatcher() {
- assertEquals(1, StrTokenizer.SPACE_MATCHER.isMatch(new char[] {' '},
1, 0));
- assertEquals(0, StrTokenizer.SPACE_MATCHER.isMatch(new char[] {'\n'},
1, 0));
- assertEquals(0, StrTokenizer.SPACE_MATCHER.isMatch(new char[]
{'\u0001'}, 1, 0));
-
- assertEquals(1, StrTokenizer.TRIM_MATCHER.isMatch(new char[] {' '}, 1,
0));
- assertEquals(1, StrTokenizer.TRIM_MATCHER.isMatch(new char[] {'\n'},
1, 0));
- assertEquals(1, StrTokenizer.TRIM_MATCHER.isMatch(new char[]
{'\u0001'}, 1, 0));
-
- assertEquals(1, StrTokenizer.SPLIT_MATCHER.isMatch(new char[] {' '},
1, 0));
- assertEquals(1, StrTokenizer.SPLIT_MATCHER.isMatch(new char[] {'\n'},
1, 0));
- assertEquals(0, StrTokenizer.SPLIT_MATCHER.isMatch(new char[]
{'\u0001'}, 1, 0));
+
+ public void testTSV() {
+ this.testXSVAbc(StrTokenizer.getTSVInstance(TSV_SIMPLE_FIXTURE));
+
this.testXSVAbc(StrTokenizer.getTSVInstance(TSV_SIMPLE_FIXTURE.toCharArray()));
+ }
+
+ public void testTSVEmpty() {
+ this.testEmpty(StrTokenizer.getCSVInstance());
+ this.testEmpty(StrTokenizer.getCSVInstance(""));
+ }
+
+ void testXSVAbc(StrTokenizer tokenizer) {
+ this.checkClone(tokenizer);
+ assertEquals(-1, tokenizer.previousIndex());
+ assertEquals(0, tokenizer.nextIndex());
+ assertEquals(null, tokenizer.previousToken());
+ assertEquals("A", tokenizer.nextToken());
+ assertEquals(1, tokenizer.nextIndex());
+ assertEquals("b", tokenizer.nextToken());
+ assertEquals(2, tokenizer.nextIndex());
+ assertEquals("c", tokenizer.nextToken());
+ assertEquals(3, tokenizer.nextIndex());
+ assertEquals(null, tokenizer.nextToken());
+ assertEquals(3, tokenizer.nextIndex());
+ assertEquals("c", tokenizer.previousToken());
+ assertEquals(2, tokenizer.nextIndex());
+ assertEquals("b", tokenizer.previousToken());
+ assertEquals(1, tokenizer.nextIndex());
+ assertEquals("A", tokenizer.previousToken());
+ assertEquals(0, tokenizer.nextIndex());
+ assertEquals(null, tokenizer.previousToken());
+ assertEquals(0, tokenizer.nextIndex());
+ assertEquals(-1, tokenizer.previousIndex());
+ assertEquals(3, tokenizer.size());
}
-
+
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]