http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b515271d/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs 
b/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs
new file mode 100644
index 0000000..61e7bf1
--- /dev/null
+++ b/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs
@@ -0,0 +1,1177 @@
+using Icu.Collation;
+using Lucene.Net.Analysis;
+using Lucene.Net.Analysis.TokenAttributes;
+using Lucene.Net.Benchmarks.ByTask.Feeds;
+using Lucene.Net.Benchmarks.ByTask.Stats;
+using Lucene.Net.Benchmarks.ByTask.Tasks;
+using Lucene.Net.Collation;
+using Lucene.Net.Facet.Taxonomy;
+using Lucene.Net.Index;
+using Lucene.Net.Search;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+using System;
+using System.Collections.Generic;
+using System.Globalization;
+using System.IO;
+using System.Text;
+
+namespace Lucene.Net.Benchmarks.ByTask
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    /// <summary>
+    /// Test very simply that perf tasks - simple algorithms - are doing what 
they should.
+    /// </summary>
+    [SuppressCodecs("Lucene3x")]
+    public class TestPerfTasksLogic : BenchmarkTestCase
+    {
+        //public override void SetUp()
+        //{
+        //    base.SetUp();
+        //    copyToWorkDir("reuters.first20.lines.txt");
+        //    copyToWorkDir("test-mapping-ISOLatin1Accent-partial.txt");
+        //}
+
+        public override void BeforeClass()
+        {
+            base.BeforeClass();
+            copyToWorkDir("reuters.first20.lines.txt");
+            copyToWorkDir("test-mapping-ISOLatin1Accent-partial.txt");
+        }
+
+        /**
+         * Test index creation logic
+         */
+        [Test]
+        public void TestIndexAndSearchTasks()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "ResetSystemErase",
+                "CreateIndex",
+                "{ AddDoc } : 1000",
+                "ForceMerge(1)",
+                "CloseIndex",
+                "OpenReader",
+                "{ CountingSearchTest } : 200",
+                "CloseReader",
+                "[ CountingSearchTest > : 70",
+                "[ CountingSearchTest > : 9",
+            };
+
+            // 2. we test this value later
+            CountingSearchTestTask.numSearches = 0;
+
+            // 3. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 4. test specific checks after the benchmark run completed.
+            assertEquals("TestSearchTask was supposed to be called!", 279, 
CountingSearchTestTask.numSearches);
+            assertTrue("Index does not exist?...!", 
DirectoryReader.IndexExists(benchmark.RunData.Directory));
+            // now we should be able to open the index for write. 
+            IndexWriter iw = new IndexWriter(benchmark.RunData.Directory,
+                new IndexWriterConfig(TEST_VERSION_CURRENT, new 
MockAnalyzer(Random()))
+                .SetOpenMode(OpenMode.APPEND));
+            iw.Dispose();
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            assertEquals("1000 docs were added to the index, this is what we 
expect to find!", 1000, ir.NumDocs);
+            ir.Dispose();
+        }
+
+        /**
+         * Test timed sequence task.
+         */
+        [Test]
+        public void TestTimedSearchTask()
+        {
+            String[] algLines = {
+                "log.step=100000",
+                "ResetSystemErase",
+                "CreateIndex",
+                "{ AddDoc } : 100",
+                "ForceMerge(1)",
+                "CloseIndex",
+                "OpenReader",
+                "{ CountingSearchTest } : .5s",
+                "CloseReader",
+            };
+
+            CountingSearchTestTask.numSearches = 0;
+            execBenchmark(algLines);
+            assertTrue(CountingSearchTestTask.numSearches > 0);
+            long elapsed = CountingSearchTestTask.prevLastMillis - 
CountingSearchTestTask.startMillis;
+            assertTrue("elapsed time was " + elapsed + " msec", elapsed <= 
1500);
+        }
+
+        // disabled until we fix BG thread prio -- this test
+        // causes build to hang
+        [Test]
+        public void TestBGSearchTaskThreads()
+        {
+            String[] algLines = {
+                "log.time.step.msec = 100",
+                "log.step=100000",
+                "ResetSystemErase",
+                "CreateIndex",
+                "{ AddDoc } : 1000",
+                "ForceMerge(1)",
+                "CloseIndex",
+                "OpenReader",
+                "{",
+                "  [ \"XSearch\" { CountingSearchTest > : * ] : 2 &-1",
+                "  Wait(0.5)",
+                "}",
+                "CloseReader",
+                "RepSumByPref X"
+            };
+
+            CountingSearchTestTask.numSearches = 0;
+            execBenchmark(algLines);
+
+            // NOTE: cannot assert this, because on a super-slow
+            // system, it could be after waiting 0.5 seconds that
+            // the search threads hadn't yet succeeded in starting
+            // up and then they start up and do no searching:
+            //assertTrue(CountingSearchTestTask.numSearches > 0);
+        }
+
+        [Test]
+        public void TestHighlighting()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "doc.stored=true",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "query.maker=" + 
typeof(ReutersQueryMaker).AssemblyQualifiedName,
+                "ResetSystemErase",
+                "CreateIndex",
+                "{ AddDoc } : 100",
+                "ForceMerge(1)",
+                "CloseIndex",
+                "OpenReader",
+                "{ 
CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body])
 } : 200",
+                "CloseReader",
+            };
+
+            // 2. we test this value later
+            CountingHighlighterTestTask.numHighlightedResults = 0;
+            CountingHighlighterTestTask.numDocsRetrieved = 0;
+            // 3. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 4. test specific checks after the benchmark run completed.
+            assertEquals("TestSearchTask was supposed to be called!", 92, 
CountingHighlighterTestTask.numDocsRetrieved);
+            //pretty hard to figure out a priori how many docs are going to 
have highlighted fragments returned, but we can never have more than the number 
of docs
+            //we probably should use a different doc/query maker, but...
+            assertTrue("TestSearchTask was supposed to be called!", 
CountingHighlighterTestTask.numDocsRetrieved >= 
CountingHighlighterTestTask.numHighlightedResults && 
CountingHighlighterTestTask.numHighlightedResults > 0);
+
+            assertTrue("Index does not exist?...!", 
DirectoryReader.IndexExists(benchmark.RunData.Directory));
+            // now we should be able to open the index for write.
+            IndexWriter iw = new IndexWriter(benchmark.RunData.Directory, new 
IndexWriterConfig(TEST_VERSION_CURRENT, new 
MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+            iw.Dispose();
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            assertEquals("100 docs were added to the index, this is what we 
expect to find!", 100, ir.NumDocs);
+            ir.Dispose();
+        }
+
+        [Test]
+        public void TestHighlightingTV()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "doc.stored=true",//doc storage is required in order to have 
text to highlight
+                "doc.term.vector=true",
+                "doc.term.vector.offsets=true",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "query.maker=" + 
typeof(ReutersQueryMaker).AssemblyQualifiedName,
+                "ResetSystemErase",
+                "CreateIndex",
+                "{ AddDoc } : 1000",
+                "ForceMerge(1)",
+                "CloseIndex",
+                "OpenReader",
+                "{ 
CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body])
 } : 200",
+                "CloseReader",
+            };
+
+            // 2. we test this value later
+            CountingHighlighterTestTask.numHighlightedResults = 0;
+            CountingHighlighterTestTask.numDocsRetrieved = 0;
+            // 3. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 4. test specific checks after the benchmark run completed.
+            assertEquals("TestSearchTask was supposed to be called!", 92, 
CountingHighlighterTestTask.numDocsRetrieved);
+            //pretty hard to figure out a priori how many docs are going to 
have highlighted fragments returned, but we can never have more than the number 
of docs
+            //we probably should use a different doc/query maker, but...
+            assertTrue("TestSearchTask was supposed to be called!", 
CountingHighlighterTestTask.numDocsRetrieved >= 
CountingHighlighterTestTask.numHighlightedResults && 
CountingHighlighterTestTask.numHighlightedResults > 0);
+
+            assertTrue("Index does not exist?...!", 
DirectoryReader.IndexExists(benchmark.RunData.Directory));
+            // now we should be able to open the index for write.
+            IndexWriter iw = new IndexWriter(benchmark.RunData.Directory, new 
IndexWriterConfig(TEST_VERSION_CURRENT, new 
MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+            iw.Dispose();
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            assertEquals("1000 docs were added to the index, this is what we 
expect to find!", 1000, ir.NumDocs);
+            ir.Dispose();
+        }
+
+        [Test]
+        public void TestHighlightingNoTvNoStore()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "doc.stored=false",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "query.maker=" + 
typeof(ReutersQueryMaker).AssemblyQualifiedName,
+                "ResetSystemErase",
+                "CreateIndex",
+                "{ AddDoc } : 1000",
+                "ForceMerge(1)",
+                "CloseIndex",
+                "OpenReader",
+                "{ 
CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body])
 } : 200",
+                "CloseReader",
+            };
+
+            // 2. we test this value later
+            CountingHighlighterTestTask.numHighlightedResults = 0;
+            CountingHighlighterTestTask.numDocsRetrieved = 0;
+            // 3. execute the algorithm  (required in every "logic" test)
+            try
+            {
+                Benchmark benchmark = execBenchmark(algLines);
+                assertTrue("CountingHighlighterTest should have thrown an 
exception", false);
+                assertNotNull(benchmark); // (avoid compile warning on unused 
variable)
+            }
+#pragma warning disable 168
+            catch (Exception e)
+#pragma warning restore 168
+            {
+                assertTrue(true);
+            }
+        }
+
+        /**
+         * Test Exhasting Doc Maker logic
+         */
+        [Test]
+        public void TestExhaustContentSource()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.SingleDocSource, 
Lucene.Net.Benchmark",
+                "content.source.log.step=1",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "# ----- alg ",
+                "CreateIndex",
+                "{ AddDoc } : * ",
+                "ForceMerge(1)",
+                "CloseIndex",
+                "OpenReader",
+                "{ CountingSearchTest } : 100",
+                "CloseReader",
+                "[ CountingSearchTest > : 30",
+                "[ CountingSearchTest > : 9",
+            };
+
+            // 2. we test this value later
+            CountingSearchTestTask.numSearches = 0;
+
+            // 3. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 4. test specific checks after the benchmark run completed.
+            assertEquals("TestSearchTask was supposed to be called!", 139, 
CountingSearchTestTask.numSearches);
+            assertTrue("Index does not exist?...!", 
DirectoryReader.IndexExists(benchmark.RunData.Directory));
+            // now we should be able to open the index for write. 
+            IndexWriter iw = new IndexWriter(benchmark.RunData.Directory, new 
IndexWriterConfig(TEST_VERSION_CURRENT, new 
MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+            iw.Dispose();
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            assertEquals("1 docs were added to the index, this is what we 
expect to find!", 1, ir.NumDocs);
+            ir.Dispose();
+        }
+
+        // LUCENE-1994: test thread safety of SortableSingleDocMaker
+        [Test]
+        public void TestDocMakerThreadSafety()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.SortableSingleDocSource, 
Lucene.Net.Benchmark",
+                "doc.term.vector=false",
+                "log.step.AddDoc=10000",
+                "content.source.forever=true",
+                "directory=RAMDirectory",
+                "doc.reuse.fields=false",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "doc.index.props=true",
+                "# ----- alg ",
+                "CreateIndex",
+                "[ { AddDoc > : 250 ] : 4",
+                "CloseIndex",
+            };
+
+            // 2. we test this value later
+            CountingSearchTestTask.numSearches = 0;
+
+            // 3. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            DirectoryReader r = 
DirectoryReader.Open(benchmark.RunData.Directory);
+            SortedDocValues idx = 
FieldCache.DEFAULT.GetTermsIndex(SlowCompositeReaderWrapper.Wrap(r), "country");
+            int maxDoc = r.MaxDoc;
+            assertEquals(1000, maxDoc);
+            for (int i = 0; i < 1000; i++)
+            {
+                assertTrue("doc " + i + " has null country", idx.GetOrd(i) != 
-1);
+            }
+            r.Dispose();
+        }
+
+        /**
+         * Test Parallel Doc Maker logic (for LUCENE-940)
+         */
+        [Test]
+        public void TestParallelDocMaker()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=FSDirectory",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "# ----- alg ",
+                "CreateIndex",
+                "[ { AddDoc } : * ] : 4 ",
+                "CloseIndex",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 3. test number of docs in the index
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            int ndocsExpected = 20; // first 20 reuters docs.
+            assertEquals("wrong number of docs in the index!", ndocsExpected, 
ir.NumDocs);
+            ir.Dispose();
+        }
+
+        /**
+         * Test WriteLineDoc and LineDocSource.
+         */
+        [Test]
+        public void TestLineDocFile()
+        {
+            FileInfo lineFile = CreateTempFile("test.reuters.lines", ".txt");
+
+            // We will call WriteLineDocs this many times
+            int NUM_TRY_DOCS = 50;
+
+            // Creates a line file with first 50 docs from SingleDocSource
+            String[] algLines1 = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.SingleDocSource, 
Lucene.Net.Benchmark",
+                "content.source.forever=true",
+                "line.file.out=" + lineFile.FullName.Replace('\\', '/'),
+                "# ----- alg ",
+                "{WriteLineDoc()}:" + NUM_TRY_DOCS,
+            };
+
+            // Run algo
+            Benchmark benchmark = execBenchmark(algLines1);
+
+            TextReader r =
+                new StreamReader(
+                    new FileStream(lineFile.FullName, FileMode.Open, 
FileAccess.Read), Encoding.UTF8);
+            int numLines = 0;
+            String line;
+            while ((line = r.ReadLine()) != null)
+            {
+                if (numLines == 0 && 
line.StartsWith(WriteLineDocTask.FIELDS_HEADER_INDICATOR, 
StringComparison.Ordinal))
+                {
+                    continue; // do not count the header line as a doc 
+                }
+                numLines++;
+            }
+            r.Dispose();
+            assertEquals("did not see the right number of docs; should be " + 
NUM_TRY_DOCS + " but was " + numLines, NUM_TRY_DOCS, numLines);
+
+            // Index the line docs
+            String[] algLines2 = {
+                "# ----- properties ",
+                "analyzer=Lucene.Net.Analysis.Core.WhitespaceAnalyzer, 
Lucene.Net.Analysis.Common",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + lineFile.FullName.Replace('\\', '/'),
+                "content.source.forever=false",
+                "doc.reuse.fields=false",
+                "ram.flush.mb=4",
+                "# ----- alg ",
+                "ResetSystemErase",
+                "CreateIndex",
+                "{AddDoc}: *",
+                "CloseIndex",
+            };
+
+            // Run algo
+            benchmark = execBenchmark(algLines2);
+
+            // now we should be able to open the index for write. 
+            IndexWriter iw = new IndexWriter(benchmark.RunData.Directory,
+                new IndexWriterConfig(TEST_VERSION_CURRENT, new 
MockAnalyzer(Random()))
+                    .SetOpenMode(OpenMode.APPEND));
+            iw.Dispose();
+
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            assertEquals(numLines + " lines were created but " + ir.NumDocs + 
" docs are in the index", numLines, ir.NumDocs);
+            ir.Dispose();
+
+            lineFile.Delete();
+        }
+
+        /**
+         * Test ReadTokensTask
+         */
+        [Test]
+        public void TestReadTokens()
+        {
+
+            // We will call ReadTokens on this many docs
+            int NUM_DOCS = 20;
+
+            // Read tokens from first NUM_DOCS docs from Reuters and
+            // then build index from the same docs
+            String[] algLines1 = {
+                "# ----- properties ",
+                "analyzer=Lucene.Net.Analysis.Core.WhitespaceAnalyzer, 
Lucene.Net.Analysis.Common",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "# ----- alg ",
+                "{ReadTokens}: " + NUM_DOCS,
+                "ResetSystemErase",
+                "CreateIndex",
+                "{AddDoc}: " + NUM_DOCS,
+                "CloseIndex",
+            };
+
+            // Run algo
+            Benchmark benchmark = execBenchmark(algLines1);
+
+            IList<TaskStats> stats = benchmark.RunData.Points.TaskStats;
+
+            // Count how many tokens all ReadTokens saw
+            int totalTokenCount1 = 0;
+            foreach (TaskStats stat in stats)
+            {
+                if (stat.Task.GetName().Equals("ReadTokens"))
+                {
+                    totalTokenCount1 += stat.Count;
+                }
+            }
+
+            // Separately count how many tokens are actually in the index:
+            IndexReader reader = 
DirectoryReader.Open(benchmark.RunData.Directory);
+            assertEquals(NUM_DOCS, reader.NumDocs);
+
+            int totalTokenCount2 = 0;
+
+            Fields fields = MultiFields.GetFields(reader);
+
+            foreach (String fieldName in fields)
+            {
+                if (fieldName.Equals(DocMaker.ID_FIELD) || 
fieldName.Equals(DocMaker.DATE_MSEC_FIELD) || 
fieldName.Equals(DocMaker.TIME_SEC_FIELD))
+                {
+                    continue;
+                }
+                Terms terms = fields.GetTerms(fieldName);
+                if (terms == null)
+                {
+                    continue;
+                }
+                TermsEnum termsEnum = terms.GetIterator(null);
+                DocsEnum docs = null;
+                while (termsEnum.Next() != null)
+                {
+                    docs = TestUtil.Docs(Random(), termsEnum, 
MultiFields.GetLiveDocs(reader), docs, DocsFlags.FREQS);
+                    while (docs.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
+                    {
+                        totalTokenCount2 += docs.Freq;
+                    }
+                }
+            }
+            reader.Dispose();
+
+            // Make sure they are the same
+            assertEquals(totalTokenCount1, totalTokenCount2);
+        }
+
+        /**
+         * Test that " {[AddDoc(4000)]: 4} : * " works corrcetly (for 
LUCENE-941)
+         */
+        [Test]
+        public void TestParallelExhausted()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "task.max.depth.log=1",
+                "# ----- alg ",
+                "CreateIndex",
+                "{ [ AddDoc]: 4} : * ",
+                "ResetInputs ",
+                "{ [ AddDoc]: 4} : * ",
+                "WaitForMerges",
+                "CloseIndex",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 3. test number of docs in the index
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            int ndocsExpected = 2 * 20; // first 20 reuters docs.
+            assertEquals("wrong number of docs in the index!", ndocsExpected, 
ir.NumDocs);
+            ir.Dispose();
+        }
+
+
+        /**
+         * Test that exhaust in loop works as expected (LUCENE-1115).
+         */
+        [Test]
+        public void TestExhaustedLooped()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "task.max.depth.log=1",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "  WaitForMerges",
+                "  CloseIndex",
+                "} : 2",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 3. test number of docs in the index
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            int ndocsExpected = 20;  // first 20 reuters docs.
+            assertEquals("wrong number of docs in the index!", ndocsExpected, 
ir.NumDocs);
+            ir.Dispose();
+        }
+
+        /**
+         * Test that we can close IndexWriter with argument "false".
+         */
+        [Test]
+        public void TestCloseIndexFalse()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "ram.flush.mb=-1",
+                "max.buffered=2",
+                "content.source.log.step=3",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "debug.level=1",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "  CloseIndex(false)",
+                "} : 2",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 3. test number of docs in the index
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            int ndocsExpected = 20; // first 20 reuters docs.
+            assertEquals("wrong number of docs in the index!", ndocsExpected, 
ir.NumDocs);
+            ir.Dispose();
+        }
+
+        public class MyMergeScheduler : SerialMergeScheduler
+        {
+            internal bool called;
+            public MyMergeScheduler()
+                : base()
+            {
+                called = true;
+            }
+        }
+
+        /**
+         * Test that we can set merge scheduler".
+         */
+        [Test]
+        public void TestMergeScheduler()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "merge.scheduler=" + 
typeof(MyMergeScheduler).AssemblyQualifiedName,
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "debug.level=1",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "} : 2",
+            };
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            assertTrue("did not use the specified MergeScheduler",
+                ((MyMergeScheduler)benchmark.RunData.IndexWriter.Config
+                    .MergeScheduler).called);
+            benchmark.RunData.IndexWriter.Dispose();
+
+            // 3. test number of docs in the index
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            int ndocsExpected = 20; // first 20 reuters docs.
+            assertEquals("wrong number of docs in the index!", ndocsExpected, 
ir.NumDocs);
+            ir.Dispose();
+        }
+
+        public class MyMergePolicy : LogDocMergePolicy
+        {
+            internal bool called;
+            public MyMergePolicy()
+            {
+                called = true;
+            }
+        }
+
+        /**
+         * Test that we can set merge policy".
+         */
+        [Test]
+        public void TestMergePolicy()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "ram.flush.mb=-1",
+                "max.buffered=2",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "merge.policy=" + typeof(MyMergePolicy).AssemblyQualifiedName,
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "debug.level=1",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "} : 2",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+            assertTrue("did not use the specified MergePolicy", 
((MyMergePolicy)benchmark.RunData.IndexWriter.Config.MergePolicy).called);
+            benchmark.RunData.IndexWriter.Dispose();
+
+            // 3. test number of docs in the index
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            int ndocsExpected = 20; // first 20 reuters docs.
+            assertEquals("wrong number of docs in the index!", ndocsExpected, 
ir.NumDocs);
+            ir.Dispose();
+        }
+
+        /**
+         * Test that IndexWriter settings stick.
+         */
+        [Test]
+        public void TestIndexWriterSettings()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "ram.flush.mb=-1",
+                "max.buffered=2",
+                "compound=cmpnd:true:false",
+                "doc.term.vector=vector:false:true",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "doc.stored=false",
+                "merge.factor=3",
+                "doc.tokenized=false",
+                "debug.level=1",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "  NewRound",
+                "} : 2",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+            IndexWriter writer = benchmark.RunData.IndexWriter;
+            assertEquals(2, writer.Config.MaxBufferedDocs);
+            assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, 
(int)writer.Config.RAMBufferSizeMB);
+            assertEquals(3, 
((LogMergePolicy)writer.Config.MergePolicy).MergeFactor);
+            assertEquals(0.0d, writer.Config.MergePolicy.NoCFSRatio, 0.0);
+            writer.Dispose();
+            Store.Directory dir = benchmark.RunData.Directory;
+            IndexReader reader = DirectoryReader.Open(dir);
+            Fields tfv = reader.GetTermVectors(0);
+            assertNotNull(tfv);
+            assertTrue(tfv.Count > 0);
+            reader.Dispose();
+        }
+
+        /**
+         * Test indexing with facets tasks.
+         */
+        [Test]
+        public void TestIndexingWithFacets()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=100",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "doc.stored=false",
+                "merge.factor=3",
+                "doc.tokenized=false",
+                "debug.level=1",
+                "# ----- alg ",
+                "ResetSystemErase",
+                "CreateIndex",
+                "CreateTaxonomyIndex",
+                "{ \"AddDocs\"  AddFacetedDoc > : * ",
+                "CloseIndex",
+                "CloseTaxonomyIndex",
+                "OpenTaxonomyReader",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+            PerfRunData runData = benchmark.RunData;
+            assertNull("taxo writer was not properly closed", 
runData.TaxonomyWriter);
+            TaxonomyReader taxoReader = runData.GetTaxonomyReader();
+            assertNotNull("taxo reader was not opened", taxoReader);
+            assertTrue("nothing was added to the taxnomy (expecting root and 
at least one addtional category)", taxoReader.Count > 1);
+            taxoReader.Dispose();
+        }
+
+        /**
+         * Test that we can call forceMerge(maxNumSegments).
+         */
+        [Test]
+        public void TestForceMerge()
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "ram.flush.mb=-1",
+                "max.buffered=3",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "merge.policy=Lucene.Net.Index.LogDocMergePolicy, Lucene.Net",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "debug.level=1",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "  ForceMerge(3)",
+                "  CloseIndex()",
+                "} : 2",
+            };
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 3. test number of docs in the index
+            IndexReader ir = DirectoryReader.Open(benchmark.RunData.Directory);
+            int ndocsExpected = 20; // first 20 reuters docs.
+            assertEquals("wrong number of docs in the index!", ndocsExpected, 
ir.NumDocs);
+            ir.Dispose();
+
+            // Make sure we have 3 segments:
+            SegmentInfos infos = new SegmentInfos();
+            infos.Read(benchmark.RunData.Directory);
+            assertEquals(3, infos.Count);
+        }
+
+        /**
+         * Test disabling task count (LUCENE-1136).
+         */
+        [Test]
+        public void TestDisableCounting()
+        {
+            doTestDisableCounting(true);
+            doTestDisableCounting(false);
+        }
+
+        private void doTestDisableCounting(bool disable)
+        {
+            // 1. alg definition (required in every "logic" test)
+            String[] algLines = disableCountingLines(disable);
+
+            // 2. execute the algorithm  (required in every "logic" test)
+            Benchmark benchmark = execBenchmark(algLines);
+
+            // 3. test counters
+            int n = disable ? 0 : 1;
+            int nChecked = 0;
+            foreach (TaskStats stats in benchmark.RunData.Points.TaskStats)
+            {
+                String taskName = stats.Task.GetName();
+                if (taskName.equals("Rounds"))
+                {
+                    assertEquals("Wrong total count!", 20 + 2 * n, 
stats.Count);
+                    nChecked++;
+                }
+                else if (taskName.equals("CreateIndex"))
+                {
+                    assertEquals("Wrong count for CreateIndex!", n, 
stats.Count);
+                    nChecked++;
+                }
+                else if (taskName.equals("CloseIndex"))
+                {
+                    assertEquals("Wrong count for CloseIndex!", n, 
stats.Count);
+                    nChecked++;
+                }
+            }
+            assertEquals("Missing some tasks to check!", 3, nChecked);
+        }
+
+        private String[] disableCountingLines(bool disable)
+        {
+            String dis = disable ? "-" : "";
+            return new String[] {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=30",
+                "doc.term.vector=false",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "doc.stored=false",
+                "doc.tokenized=false",
+                "task.max.depth.log=1",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  "+dis+"CreateIndex",            // optionally disable 
counting here
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "  "+dis+"  CloseIndex",             // optionally disable 
counting here (with extra blanks)
+                "}",
+                "RepSumByName",
+            };
+        }
+
+        /**
+         * Test that we can change the Locale in the runData,
+         * that it is parsed as we expect.
+         */
+        [Test]
+        public void TestLocale()
+        {
+            // empty Locale: clear it (null)
+            Benchmark benchmark = execBenchmark(getLocaleConfig(""));
+            assertNull(benchmark.RunData.Locale);
+
+            // ROOT locale
+            benchmark = execBenchmark(getLocaleConfig("ROOT"));
+            assertEquals(CultureInfo.InvariantCulture, 
benchmark.RunData.Locale);
+
+            // specify just a language 
+            benchmark = execBenchmark(getLocaleConfig("de"));
+            assertEquals(new CultureInfo("de"), benchmark.RunData.Locale);
+
+            // specify language + country
+            benchmark = execBenchmark(getLocaleConfig("en,US"));
+            assertEquals(new CultureInfo("en-US"), benchmark.RunData.Locale);
+
+            // specify language + country + variant
+            benchmark = execBenchmark(getLocaleConfig("no,NO,NY"));
+            assertEquals(new CultureInfo("no-NO"/*, "NY"*/), 
benchmark.RunData.Locale);
+        }
+
+        private String[] getLocaleConfig(String localeParam)
+        {
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  NewLocale(" + localeParam + ")",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "  NewRound",
+                "} : 1",
+            };
+            return algLines;
+        }
+
+        /**
+         * Test that we can create CollationAnalyzers.
+         */
+        [Test]
+        public void TestCollator()
+        {
+            // LUCENENET specific - we don't have a JDK version of collator
+            // so we are using ICU
+            var collatorParam = "impl:icu";
+
+            // ROOT locale
+            Benchmark benchmark = execBenchmark(getCollatorConfig("ROOT", 
collatorParam));
+            ICUCollationKeyAnalyzer expected = new 
ICUCollationKeyAnalyzer(TEST_VERSION_CURRENT, Collator
+                .Create(CultureInfo.InvariantCulture));
+            assertEqualCollation(expected, benchmark.RunData.Analyzer, 
"foobar");
+
+            // specify just a language
+            benchmark = execBenchmark(getCollatorConfig("de", collatorParam));
+            expected = new ICUCollationKeyAnalyzer(TEST_VERSION_CURRENT, 
Collator.Create(new CultureInfo("de")));
+            assertEqualCollation(expected, benchmark.RunData.Analyzer, 
"foobar");
+
+            // specify language + country
+            benchmark = execBenchmark(getCollatorConfig("en,US", 
collatorParam));
+            expected = new ICUCollationKeyAnalyzer(TEST_VERSION_CURRENT, 
Collator.Create(new CultureInfo("en-US"), Collator.Fallback.FallbackAllowed));
+            assertEqualCollation(expected, benchmark.RunData.Analyzer, 
"foobar");
+
+            // specify language + country + variant
+            benchmark = execBenchmark(getCollatorConfig("no,NO,NY", 
collatorParam));
+            expected = new ICUCollationKeyAnalyzer(TEST_VERSION_CURRENT, 
Collator.Create(new CultureInfo("no-NO"/*, "NY"*/), 
Collator.Fallback.FallbackAllowed));
+            assertEqualCollation(expected, benchmark.RunData.Analyzer, 
"foobar");
+        }
+
+        private void assertEqualCollation(Analyzer a1, Analyzer a2, String 
text)
+        {
+            TokenStream ts1 = a1.GetTokenStream("bogus", text);
+            TokenStream ts2 = a2.GetTokenStream("bogus", text);
+            ts1.Reset();
+            ts2.Reset();
+            ITermToBytesRefAttribute termAtt1 = 
ts1.AddAttribute<ITermToBytesRefAttribute>();
+            ITermToBytesRefAttribute termAtt2 = 
ts2.AddAttribute<ITermToBytesRefAttribute>();
+            assertTrue(ts1.IncrementToken());
+            assertTrue(ts2.IncrementToken());
+            BytesRef bytes1 = termAtt1.BytesRef;
+            BytesRef bytes2 = termAtt2.BytesRef;
+            termAtt1.FillBytesRef();
+            termAtt2.FillBytesRef();
+            assertEquals(bytes1, bytes2);
+            assertFalse(ts1.IncrementToken());
+            assertFalse(ts2.IncrementToken());
+            ts1.Dispose();
+            ts2.Dispose();
+        }
+
+        private String[] getCollatorConfig(String localeParam,
+            String collationParam)
+        {
+            String[] algLines = {
+                "# ----- properties ",
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "content.source.log.step=3",
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "# ----- alg ",
+                "{ \"Rounds\"",
+                "  ResetSystemErase",
+                "  NewLocale(" + localeParam + ")",
+                "  NewCollationAnalyzer(" + collationParam + ")",
+                "  CreateIndex",
+                "  { \"AddDocs\"  AddDoc > : * ",
+                "  NewRound",
+                "} : 1",
+            };
+            return algLines;
+        }
+
+        /**
+         * Test that we can create shingle analyzers using AnalyzerFactory.
+         */
+        [Test]
+        public void TestShingleAnalyzer()
+        {
+            String text = "one,two,three, four five six";
+
+            // StandardTokenizer, maxShingleSize, and outputUnigrams
+            Benchmark benchmark = execBenchmark(getAnalyzerFactoryConfig
+                ("shingle-analyzer", "StandardTokenizer,ShingleFilter"));
+            benchmark.RunData.Analyzer.GetTokenStream
+                ("bogus", text).Dispose();
+            
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
+                                                     new String[] { "one", 
"one two", "two", "two three",
+                                                            "three", "three 
four", "four", "four five",
+                                                            "five", "five 
six", "six" });
+            // StandardTokenizer, maxShingleSize = 3, and outputUnigrams = 
false
+            benchmark = execBenchmark
+              (getAnalyzerFactoryConfig
+                  ("shingle-analyzer",
+                   
"StandardTokenizer,ShingleFilter(maxShingleSize:3,outputUnigrams:false)"));
+            
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
+                                                     new String[] { "one two", 
"one two three", "two three",
+                                                            "two three four", 
"three four",
+                                                            "three four five", 
"four five",
+                                                            "four five six", 
"five six" });
+            // WhitespaceTokenizer, default maxShingleSize and outputUnigrams
+            benchmark = execBenchmark
+              (getAnalyzerFactoryConfig("shingle-analyzer", 
"WhitespaceTokenizer,ShingleFilter"));
+            
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
+                                                     new String[] { 
"one,two,three,", "one,two,three, four",
+                                                            "four", "four 
five", "five", "five six",
+                                                            "six" });
+
+            // WhitespaceTokenizer, maxShingleSize=3 and outputUnigrams=false
+            benchmark = execBenchmark
+              (getAnalyzerFactoryConfig
+                ("shingle-factory",
+                 
"WhitespaceTokenizer,ShingleFilter(outputUnigrams:false,maxShingleSize:3)"));
+            
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
+                                                     new String[] { 
"one,two,three, four",
+                                                            "one,two,three, 
four five",
+                                                            "four five", "four 
five six",
+                                                            "five six" });
+        }
+
+        private String[] getAnalyzerFactoryConfig(String name, String @params)
+        {
+            //String singleQuoteEscapedName = name.Replace("'", "\\\\'");
+            //String[] algLines = {
+            //    
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+            //    "docs.file=" + getReuters20LinesFile(),
+            //    "work.dir=" + getWorkDir().FullName.Replace(@"\\\\", "/"), 
// Fix Windows path
+            //    "content.source.forever=false",
+            //    "directory=RAMDirectory",
+            //    "AnalyzerFactory(name:'" + singleQuoteEscapedName + "', " + 
@params + ")",
+            //    "NewAnalyzer('" + singleQuoteEscapedName + "')",
+            //    "CreateIndex",
+            //    "{ \"AddDocs\"  AddDoc > : * "
+            //};
+            //String singleQuoteEscapedName = name.Replace("'", @"\'");
+            String singleQuoteEscapedName = name.Replace("'", @"\'");
+            String[] algLines = {
+                
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, 
Lucene.Net.Benchmark",
+                "docs.file=" + getReuters20LinesFile(),
+                "work.dir=" + getWorkDir().FullName.Replace(@"\", "/"), // Fix 
Windows path
+                "content.source.forever=false",
+                "directory=RAMDirectory",
+                "AnalyzerFactory(name:'" + singleQuoteEscapedName + "', " + 
@params + ")",
+                "NewAnalyzer('" + singleQuoteEscapedName + "')",
+                "CreateIndex",
+                "{ \"AddDocs\"  AddDoc > : * "
+            };
+            return algLines;
+        }
+
+        [Test]
+        public void TestAnalyzerFactory()
+        {
+            String text = "Fortieth, Quarantième, Cuadragésimo";
+            Benchmark benchmark = execBenchmark(getAnalyzerFactoryConfig
+                ("ascii folded, pattern replaced, standard tokenized, 
downcased, bigrammed.'analyzer'",
+                 "positionIncrementGap:100,offsetGap:1111,"
+                 + 
"MappingCharFilter(mapping:'test-mapping-ISOLatin1Accent-partial.txt'),"
+                 + 
"PatternReplaceCharFilterFactory(pattern:'e(\\\\\\\\S*)m',replacement:\"$1xxx$1\"),"
+                 + 
"StandardTokenizer,LowerCaseFilter,NGramTokenFilter(minGramSize:2,maxGramSize:2)"));
+            
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
+                new String[] { "fo", "or", "rt", "ti", "ie", "et", "th",
+                       "qu", "ua", "ar", "ra", "an", "nt", "ti", "ix", "xx", 
"xx", "xe",
+                       "cu", "ua", "ad", "dr", "ra", "ag", "gs", "si", "ix", 
"xx", "xx", "xs", "si", "io"});
+        }
+
+        private String getReuters20LinesFile()
+        {
+            return getWorkDirResourcePath("reuters.first20.lines.txt");
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b515271d/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksParse.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksParse.cs 
b/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksParse.cs
new file mode 100644
index 0000000..e604cef
--- /dev/null
+++ b/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksParse.cs
@@ -0,0 +1,178 @@
+using Lucene.Net.Benchmarks.ByTask.Feeds;
+using Lucene.Net.Benchmarks.ByTask.Tasks;
+using Lucene.Net.Benchmarks.ByTask.Utils;
+using Lucene.Net.Search;
+using Lucene.Net.Store;
+using Lucene.Net.Util;
+using NUnit.Framework;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+
+namespace Lucene.Net.Benchmarks.ByTask
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    /// <summary>
+    /// Test very simply that perf tasks are parses as expected.
+    /// </summary>
+    public class TestPerfTasksParse : LuceneTestCase
+    {
+        static readonly String NEW_LINE = Environment.NewLine;
+        static readonly String INDENT = "  ";
+
+        // properties in effect in all tests here
+        static readonly String propPart =
+          INDENT + "directory=RAMDirectory" + NEW_LINE +
+          INDENT + "print.props=false" + NEW_LINE
+        ;
+
+        /** Test the repetiotion parsing for parallel tasks */
+        [Test]
+        public void TestParseParallelTaskSequenceRepetition()
+        {
+            String taskStr = "AddDoc";
+            String parsedTasks = "[ " + taskStr + " ] : 1000";
+            Benchmark benchmark = new Benchmark(new StringReader(propPart + 
parsedTasks));
+            Algorithm alg = benchmark.Algorithm;
+            IList<PerfTask> algTasks = alg.ExtractTasks();
+            bool foundAdd = false;
+            foreach (PerfTask task in algTasks)
+            {
+                if (task.toString().IndexOf(taskStr) >= 0)
+                {
+                    foundAdd = true;
+                }
+                if (task is TaskSequence)
+                {
+                    assertEquals("repetions should be 1000 for " + 
parsedTasks, 1000, ((TaskSequence)task).Repetitions);
+                    assertTrue("sequence for " + parsedTasks + " should be 
parallel!", ((TaskSequence)task).IsParallel);
+                }
+                assertTrue("Task " + taskStr + " was not found in " + 
alg.toString(), foundAdd);
+            }
+        }
+
+        /** Test the repetiotion parsing for sequential  tasks */
+        [Test]
+        public void TestParseTaskSequenceRepetition()
+        {
+            String taskStr = "AddDoc";
+            String parsedTasks = "{ " + taskStr + " } : 1000";
+            Benchmark benchmark = new Benchmark(new StringReader(propPart + 
parsedTasks));
+            Algorithm alg = benchmark.Algorithm;
+            IList<PerfTask> algTasks = alg.ExtractTasks();
+            bool foundAdd = false;
+            foreach (PerfTask task in algTasks)
+            {
+                if (task.toString().IndexOf(taskStr) >= 0)
+                {
+                    foundAdd = true;
+                }
+                if (task is TaskSequence)
+                {
+                    assertEquals("repetions should be 1000 for " + 
parsedTasks, 1000, ((TaskSequence)task).Repetitions);
+                    assertFalse("sequence for " + parsedTasks + " should be 
sequential!", ((TaskSequence)task).IsParallel);
+                }
+                assertTrue("Task " + taskStr + " was not found in " + 
alg.toString(), foundAdd);
+            }
+        }
+
+        public class MockContentSource : ContentSource
+        {
+            public override DocData GetNextDocData(DocData docData)
+            {
+                return docData;
+            }
+
+            protected override void Dispose(bool disposing) { }
+        }
+
+        public class MockQueryMaker : AbstractQueryMaker
+        {
+            protected override Query[] PrepareQueries()
+            {
+                return new Query[0];
+            }
+        }
+
+        /// <summary>Test the parsing of example scripts</summary>
+        [Test]
+        public void TestParseExamples()
+        {
+            // LUCENENET specific
+            // Rather than relying on a file path somewhere, we store the
+            // files zipped in an embedded resource and unzip them to a
+            // known temp directory for the test.
+            DirectoryInfo examplesDir = CreateTempDir("test-parse-examples");
+            using (var stream = GetType().getResourceAsStream("conf.zip"))
+            {
+                TestUtil.Unzip(stream, examplesDir);
+            }
+
+            // hackedy-hack-hack
+            bool foundFiles = false;
+
+            foreach (FileInfo algFile in examplesDir.EnumerateFiles("*.alg"))
+            {
+                try
+                {
+                    Config config = new Config(new StreamReader(new 
FileStream(algFile.FullName, FileMode.Open, FileAccess.Read), Encoding.UTF8));
+                    String contentSource = config.Get("content.source", null);
+                    if (contentSource != null)
+                    {
+                        if (Type.GetType(contentSource) == null)
+                            throw new TypeLoadException(contentSource);
+                    }
+                    config.Set("work.dir", 
CreateTempDir(LuceneTestCase.TestClass.Name).FullName);
+                    config.Set("content.source", 
typeof(MockContentSource).AssemblyQualifiedName);
+                    String dir = config.Get("content.source", null);
+                    if (dir != null)
+                    {
+                        if (Type.GetType(dir) == null)
+                            throw new TypeLoadException(dir);
+                    }
+                    config.Set("directory", 
typeof(RAMDirectory).AssemblyQualifiedName);
+                    if (config.Get("line.file.out", null) != null)
+                    {
+                        config.Set("line.file.out", CreateTempFile("linefile", 
".txt").FullName);
+                    }
+                    string queryMaker = config.Get("query.maker", null);
+                    if (queryMaker != null)
+                    {
+                        if (Type.GetType(queryMaker) == null)
+                            throw new TypeLoadException(queryMaker);
+
+                        config.Set("query.maker", 
typeof(MockQueryMaker).AssemblyQualifiedName);
+                    }
+                    PerfRunData data = new PerfRunData(config);
+                    new Algorithm(data);
+                }
+                catch (Exception t)
+                {
+                    throw new Exception("Could not parse sample file: " + 
algFile, t);
+                }
+                foundFiles = true;
+            }
+            if (!foundFiles)
+            {
+                fail("could not find any .alg files!");
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b515271d/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/StreamUtilsTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/StreamUtilsTest.cs 
b/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/StreamUtilsTest.cs
new file mode 100644
index 0000000..eb2aaec
--- /dev/null
+++ b/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/StreamUtilsTest.cs
@@ -0,0 +1,149 @@
+using ICSharpCode.SharpZipLib.BZip2;
+using Lucene.Net.Util;
+using NUnit.Framework;
+using System;
+using System.IO;
+using System.IO.Compression;
+using System.Text;
+
+namespace Lucene.Net.Benchmarks.ByTask.Utils
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    public class StreamUtilsTest : BenchmarkTestCase
+    {
+        private static readonly String TEXT = "Some-Text...";
+        private DirectoryInfo testDir;
+
+        [Test]
+        public void TestGetInputStreamPlainText()
+        {
+            assertReadText(rawTextFile("txt"));
+            assertReadText(rawTextFile("TXT"));
+        }
+
+        [Test]
+        public void TestGetInputStreamGzip()
+        {
+            assertReadText(rawGzipFile("gz"));
+            assertReadText(rawGzipFile("gzip"));
+            assertReadText(rawGzipFile("GZ"));
+            assertReadText(rawGzipFile("GZIP"));
+        }
+
+        [Test]
+        public void TestGetInputStreamBzip2()
+        {
+            assertReadText(rawBzip2File("bz2"));
+            assertReadText(rawBzip2File("bzip"));
+            assertReadText(rawBzip2File("BZ2"));
+            assertReadText(rawBzip2File("BZIP"));
+        }
+
+        [Test]
+        public void TestGetOutputStreamBzip2()
+        {
+            assertReadText(autoOutFile("bz2"));
+            assertReadText(autoOutFile("bzip"));
+            assertReadText(autoOutFile("BZ2"));
+            assertReadText(autoOutFile("BZIP"));
+        }
+
+        [Test]
+        public void TestGetOutputStreamGzip()
+        {
+            assertReadText(autoOutFile("gz"));
+            assertReadText(autoOutFile("gzip"));
+            assertReadText(autoOutFile("GZ"));
+            assertReadText(autoOutFile("GZIP"));
+        }
+
+        [Test]
+        public void TestGetOutputStreamPlain()
+        {
+            assertReadText(autoOutFile("txt"));
+            assertReadText(autoOutFile("text"));
+            assertReadText(autoOutFile("TXT"));
+            assertReadText(autoOutFile("TEXT"));
+        }
+
+        private FileInfo rawTextFile(String ext)
+        {
+            FileInfo f = new FileInfo(Path.Combine(testDir.FullName, 
"testfile." + ext));
+            using (TextWriter w = new StreamWriter(new FileStream(f.FullName, 
FileMode.Create, FileAccess.Write), Encoding.UTF8))
+                w.WriteLine(TEXT);
+            return f;
+        }
+
+        private FileInfo rawGzipFile(String ext)
+        {
+            FileInfo f = new FileInfo(Path.Combine(testDir.FullName, 
"testfile." + ext));
+            using (Stream os = new GZipStream(new FileStream(f.FullName, 
FileMode.Create, FileAccess.Write), CompressionMode.Compress)) //new 
CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.GZIP,
 new FileOutputStream(f));
+                writeText(os);
+            return f;
+        }
+
+        private FileInfo rawBzip2File(String ext)
+        {
+            FileInfo f = new FileInfo(Path.Combine(testDir.FullName, 
"testfile." + ext));
+            Stream os = new BZip2OutputStream(new FileStream(f.FullName, 
FileMode.Create, FileAccess.Write));  // new 
CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.BZIP2,
 new FileOutputStream(f));
+                writeText(os);
+            return f;
+        }
+
+        private FileInfo autoOutFile(String ext)
+        {
+            FileInfo f = new FileInfo(Path.Combine(testDir.FullName, 
"testfile." + ext));
+            Stream os = StreamUtils.GetOutputStream(f);
+            writeText(os);
+            return f;
+        }
+
+        private void writeText(Stream os)
+        {
+            TextWriter w = new StreamWriter(os, Encoding.UTF8);
+            w.WriteLine(TEXT);
+            w.Dispose();
+        }
+
+        private void assertReadText(FileInfo f)
+        {
+            Stream ir = StreamUtils.GetInputStream(f);
+            TextReader r = new StreamReader(ir, Encoding.UTF8);
+            String line = r.ReadLine();
+            assertEquals("Wrong text found in " + f.Name, TEXT, line);
+            r.Dispose();
+        }
+
+        public override void SetUp()
+        {
+            base.SetUp();
+            testDir = new DirectoryInfo(Path.Combine(getWorkDir().FullName, 
"ContentSourceTest"));
+            TestUtil.Rm(testDir);
+            //assertTrue(testDir.mkdirs());
+            testDir.Create();
+            assertTrue(Directory.Exists(testDir.FullName));
+        }
+
+        public override void TearDown()
+        {
+            TestUtil.Rm(testDir);
+            base.TearDown();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b515271d/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/TestConfig.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/TestConfig.cs 
b/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/TestConfig.cs
new file mode 100644
index 0000000..75f16a9
--- /dev/null
+++ b/src/Lucene.Net.Tests.Benchmark/ByTask/Utils/TestConfig.cs
@@ -0,0 +1,37 @@
+using Lucene.Net.Util;
+using NUnit.Framework;
+using System.Collections.Generic;
+
+namespace Lucene.Net.Benchmarks.ByTask.Utils
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    public class TestConfig : LuceneTestCase
+    {
+        [Test]
+        public void TestAbsolutePathNamesWindows()
+        {
+            Dictionary<string, string> props = new Dictionary<string, 
string>();
+            props["work.dir1"] = "c:\\temp";
+            props["work.dir2"] = "c:/temp";
+            Config conf = new Config(props);
+            assertEquals("c:\\temp", conf.Get("work.dir1", ""));
+            assertEquals("c:/temp", conf.Get("work.dir2", ""));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b515271d/src/Lucene.Net.Tests.Benchmark/ByTask/conf.zip
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/conf.zip 
b/src/Lucene.Net.Tests.Benchmark/ByTask/conf.zip
new file mode 100644
index 0000000..9b5755e
Binary files /dev/null and b/src/Lucene.Net.Tests.Benchmark/ByTask/conf.zip 
differ

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b515271d/src/Lucene.Net.Tests.Benchmark/ByTask/reuters.first20.lines.txt
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/reuters.first20.lines.txt 
b/src/Lucene.Net.Tests.Benchmark/ByTask/reuters.first20.lines.txt
new file mode 100644
index 0000000..41b04b3
--- /dev/null
+++ b/src/Lucene.Net.Tests.Benchmark/ByTask/reuters.first20.lines.txt
@@ -0,0 +1,20 @@
+BAHIA COCOA REVIEW     19870226200101  Showers continued throughout the week 
in the Bahia cocoa zone, alleviating the drought since early January and 
improving prospects for the coming temporao, although normal humidity levels 
have not been restored, Comissaria Smith said in its weekly review.     The dry 
period means the temporao will be late this year.     Arrivals for the week 
ended February 22 were 155,221 bags of 60 kilos making a cumulative total for 
the season of 5.93 mln against 5.81 at the same stage last year. Again it seems 
that cocoa delivered earlier on consignment was included in the arrivals 
figures.     Comissaria Smith said there is still some doubt as to how much old 
crop cocoa is still available as harvesting has practically come to an end. 
With total Bahia crop estimates around 6.4 mln bags and sales standing at 
almost 6.2 mln there are a few hundred thousand bags still in the hands of 
farmers, middlemen, exporters and processors.     There are doubts as to how 
much o
 f this cocoa would be fit for export as shippers are now experiencing 
dificulties in obtaining +Bahia superior+ certificates.     In view of the 
lower quality over recent weeks farmers have sold a good part of their cocoa 
held on consignment.     Comissaria Smith said spot bean prices rose to 340 to 
350 cruzados per arroba of 15 kilos.     Bean shippers were reluctant to offer 
nearby shipment and only limited sales were booked for March shipment at 1,750 
to 1,780 dlrs per tonne to ports to be named.     New crop sales were also 
light and all to open ports with June/July going at 1,850 and 1,880 dlrs and at 
35 and 45 dlrs under New York july, Aug/Sept at 1,870, 1,875 and 1,880 dlrs per 
tonne FOB.     Routine sales of butter were made. March/April sold at 4,340, 
4,345 and 4,350 dlrs.     April/May butter went at 2.27 times New York May, 
June/July at 4,400 and 4,415 dlrs, Aug/Sept at 4,351 to 4,450 dlrs and at 2.27 
and 2.28 times New York Sept and Oct/Dec at 4,480 dlrs and 2.27 times N
 ew York Dec, Comissaria Smith said.     Destinations were the U.S., Covertible 
currency areas, Uruguay and open ports.     Cake sales were registered at 785 
to 995 dlrs for March/April, 785 dlrs for May, 753 dlrs for Aug and 0.39 times 
New York Dec for Oct/Dec.     Buyers were the U.S., Argentina, Uruguay and 
convertible currency areas.     Liquor sales were limited with March/April 
selling at 2,325 and 2,380 dlrs, June/July at 2,375 dlrs and at 1.25 times New 
York July, Aug/Sept at 2,400 dlrs and at 1.25 times New York Sept and Oct/Dec 
at 1.25 times New York Dec, Comissaria Smith said.     Total Bahia sales are 
currently estimated at 6.13 mln bags against the 1986/87 crop and 1.06 mln bags 
against the 1987/88 crop.     Final figures for the period to February 28 are 
expected to be published by the Brazilian Cocoa Trade Commission after carnival 
which ends midday on February 27.  Reuter &#3;  
+STANDARD OIL <SRD> TO FORM FINANCIAL UNIT      19870226200220  Standard Oil Co 
and BP North America Inc said they plan to form a venture to manage the money 
market borrowing and investment activities of both companies.     BP North 
America is a subsidiary of British Petroleum Co Plc <BP>, which also owns a 55 
pct interest in Standard Oil.     The venture will be called BP/Standard 
Financial Trading and will be operated by Standard Oil under the oversight of a 
joint management committee.   Reuter &#3;  
+COBANCO INC <CBCO> YEAR NET    19870226201859  Shr 34 cts vs 1.19 dlrs     Net 
807,000 vs 2,858,000     Assets 510.2 mln vs 479.7 mln     Deposits 472.3 mln 
vs 440.3 mln     Loans 299.2 mln vs 327.2 mln     Note: 4th qtr not available. 
Year includes 1985 extraordinary gain from tax carry forward of 132,000 dlrs, 
or five cts per shr.  Reuter &#3;  
+WORLD MARKET PRICE FOR UPLAND COTTON - USDA    19870226213846  The U.S. 
Agriculture Department announced the prevailing world market price, adjusted to 
U.S. quality and location, for Strict Low Middling, 1-1/16 inch upland cotton 
at 52.69 cts per lb, to be in effect through midnight March 5.     The adjusted 
world price is at average U.S. producing locations (near Lubbock, Texas) and 
will be further adjusted for other qualities and locations. The price will be 
used in determining First Handler Cotton Certificate payment rates.     Based 
on data for the week ended February 26, the adjusted world price for upland 
cotton is determined as follows, in cts per lb --  Northern European Price      
         66.32       Adjustments --  Average U.S. spot mkt location 10.42   SLM 
1-1/16 inch cotton          1.80   Average U.S. location           0.53  Sum of 
adjustments              12.75  Adjusted world price            53.57  Reuter 
&#3;  
+SUGAR QUOTA IMPORTS DETAILED -- USDA   19870226213854  The U.S. Agriculture 
Department said cumulative sugar imports from individual countries during the 
1987 quota year, which began January 1, 1987 and ends December 31, 1987 were as 
follows, with quota allocations for the quota year in short tons, raw value --  
           CUMULATIVE     QUOTA 1987               IMPORTS     ALLOCATIONS  
ARGENTINA        nil          39,130  AUSTRALIA        nil          75,530  
BARBADOS         nil           7,500  BELIZE           nil          10,010  
BOLIVIA          nil           7,500  BRAZIL           nil         131,950  
CANADA           nil          18,876                            QUOTA 1987      
         IMPORTS     ALLOCATIONS  COLOMBIA         103          21,840  CONGO   
         nil           7,599  COSTA RICA       nil          17,583  IVORY COAST 
     nil           7,500  DOM REP        5,848         160,160  ECUADOR         
 nil          10,010  EL SALVADOR      nil          26,019.8  
 FIJI             nil          25,190  GABON            nil           7,500     
                       QUOTA 1987               IMPORTS     ALLOCATIONS  
GUATEMALA        nil          43,680  GUYANA           nil          10,920  
HAITI            nil           7,500  HONDURAS         nil          15,917.2  
INDIA            nil           7,500  JAMAICA          nil          10,010  
MADAGASCAR       nil           7,500  MALAWI           nil           9,,100     
                       QUOTA 1987                IMPORTS    ALLOCATIONS  
MAURITIUS         nil         10,920  MEXICO             37          7,500  
MOZAMBIQUE        nil         11,830  PANAMA            nil         26,390  
PAPUA NEW GUINEA  nil          7,500  PARAGUAY          nil          7,500  
PERU              nil         37,310  PHILIPPINES       nil        143,780  
ST.CHRISTOPHER-  NEVIS             nil          7,500                           
QUOTA 1987                 IMPORTS  ALLOCATIONS  SWAZILAND          nil       
   14,560  TAIWAN             nil         10,920  THAILAND           nil        
 12,740  TRINIDAD-TOBAGO    nil          7,500  URUGUAY            nil          
7,500  ZIMBABWE           nil         10,920   Reuter &#3;  
+GRAIN SHIPS LOADING AT PORTLAND        19870226213903  There were seven grain 
ships loading and six ships were waiting to load at Portland, according to the 
Portland Merchants Exchange.  Reuter &#3;  
+IRAN ANNOUNCES END OF MAJOR OFFENSIVE IN GULF WAR      19870226214000  Iran 
announced tonight that its major offensive against Iraq in the Gulf war had 
ended after dealing savage blows against the Baghdad government.     The 
Iranian news agency IRNA, in a report received in London, said the operation 
code-named Karbala-5 launched into Iraq on January 9 was now over.     It 
quoted a joint statewment by the Iranian Army and Revolutionary Guards Corps as 
saying that their forces had "dealt one of the severest blows on the Iraqi war 
machine in the history of the Iraq-imposed war."     The statement by the 
Iranian High Command appeared to herald the close of an assault on the port 
city of Basra in southern Iraq.     "The operation was launched at a time when 
the Baghdad government was spreading extensive propaganda on the resistance 
power of its army...," said the statement quoted by IRNA.     It claimed 
massive victories in the seven-week offensive and called on supporters of 
Baghdad to "come
  to their senses" and discontinue support for what it called the tottering 
regime in Iraq.     Iran said its forces had "liberated" 155 square kilometers 
of enemy-occupied territory during the 1987 offensive and taken over islands, 
townships, rivers and part of a road leading into Basra.     The Iranian forces 
"are in full control of these areas," the statement said.     It said 81 Iraqi 
brigades and battalions were totally destroyed, along with 700 tanks and 1,500 
other vehicles. The victory list also included 80 warplanes downed, 250 anti- 
aircraft guns and 400 pieces of military hardware destroyed and the seizure of 
220 tanks and armoured personnel carriers.  Reuter &#3;  
+MERIDIAN BANCORP INC <MRDN> SETS REGULAR PAYOUT        19870226214034  Qtly 
div 25 cts vs 25 cts prior     Pay April one     Record March 15  Reuter &#3;  
+U.S. BANK DISCOUNT BORROWINGS 310 MLN DLRS     19870226214134  U.S. bank 
discount window borrowings less extended credits averaged 310 mln dlrs in the 
week to Wednesday February 25, the Federal Reserve said.     The Fed said that 
overall borrowings in the week fell 131 mln dlrs to 614 mln dlrs, with extended 
credits up 10 mln dlrs at 304 mln dlrs. The week was the second half of a 
two-week statement period. Net borrowings in the prior week averaged 451 mln 
dlrs.     Commenting on the two-week statement period ended February 25, the 
Fed said that banks had average net free reserves of 644 mln dlrs a day, down 
from 1.34 billion two weeks earlier.     A Federal Reserve spokesman told a 
press briefing that there were no large single day net misses in the Fed's 
reserve projections in the week to Wednesday.     He said that natural float 
had been "acting a bit strangely" for this time of year, noting that there had 
been poor weather during the latest week.     The spokesman said that natural f
 loat ranged from under 500 mln dlrs on Friday, for which he could give no 
reason, to nearly one billion dlrs on both Thursday and Wednesday.     The Fed 
spokeman could give no reason for Thursday's high float, but he said that about 
750 mln dlrs of Wednesday's float figure was due to holdover and transportation 
float at two widely separated Fed districts.     For the week as a whole, he 
said that float related as of adjustments were "small," adding that they fell 
to a negative 750 mln dlrs on Tuesday due to a number of corrections for 
unrelated cash letter errors in six districts around the country.     The 
spokesman said that on both Tuesday and Wednesday, two different clearing banks 
had system problems and the securities and Federal funds wires had to be held 
open until about 2000 or 2100 EST on both days.     However, he said that both 
problems were cleared up during both afternoons and there was no evidence of 
any reserve impact.     During the week ended Wednesday, 45 pct of n
 et discount window borrowings were made by the smallest banks, with 30 pct by 
the 14 large money center banks and 25 pct by large regional institutions.     
On Wednesday, 55 pct of the borrowing was accounted for by the money center 
banks, with 30 pct by the large regionals and 15 pct by the smallest banks.     
The Fed spokesman said the banking system had excess reserves on Thursday, 
Monday and Tuesday and a deficit on Friday and Wedndsday. That produced a small 
daily average deficit for the week as a whole.     For the two-week period, he 
said there were relatively high excess reserves on a daily avearge, almost all 
of which were at the smallest banks.  Reuter &#3;  
+AMERICAN EXPRESS <AXP> SEEN IN POSSIBLE SPINNOFF       19870226214313  
American Express Co remained silent on market rumors it would spinoff all or 
part of its Shearson Lehman Brothers Inc, but some analysts said the company 
may be considering such a move because it is unhappy with the market value of 
its stock.     American Express stock got a lift from the rumor, as the market 
calculated a partially public Shearson may command a good market value, thereby 
boosting the total value of American Express. The rumor also was accompanied by 
talk the financial services firm would split its stock and boost its dividend.  
   American Express closed on the New York Stock Exchange at 72-5/8, up 4-1/8 
on heavy volume.     American Express would not comment on the rumors or its 
stock activity.     Analysts said comments by the company at an analysts' 
meeting Tuesday helped fuel the rumors as did an announcement yesterday of 
management changes.     At the meeting, company officials said American Expres
 s stock is undervalued and does not fully reflect the performance of Shearson, 
according to analysts.     Yesterday, Shearson said it was elevating its chief 
operating officer, Jeffery Lane, to the added position of president, which had 
been vacant. It also created four new positions for chairmen of its operating 
divisions.     Analysts speculated a partial spinoff would make most sense, 
contrary to one variation on market rumors of a total spinoff.     Some 
analysts, however, disagreed that any spinoff of Shearson would be good since 
it is a strong profit center for American Express, contributing about 20 pct of 
earnings last year.     "I think it is highly unlikely that American Express is 
going to sell shearson," said Perrin Long of Lipper Analytical. He questioned 
what would be a better investment than "a very profitable securities firm."     
Several analysts said American Express is not in need of cash, which might be 
the only reason to sell a part of a strong asset.     But ot
 hers believe the company could very well of considered the option of spinning 
out part of Shearson, and one rumor suggests selling about 20 pct of it in the 
market.     Larry Eckenfelder of Prudential-Bache Securities said he believes 
American Express could have considered a partial spinoff in the past.     
"Shearson being as profitable as it is would have fetched a big premium in the 
market place. Shearson's book value is in the 1.4 mln dlr range. Shearson in 
the market place would probably be worth three to 3.5 bilion dlrs in terms of 
market capitalization," said Eckenfelder.     Some analysts said American 
Express could use capital since it plans to expand globally.     "They have 
enormous internal growth plans that takes capital. You want your stock to 
reflect realistic valuations to enhance your ability to make all kinds of 
endeavors down the road," said E.F. Hutton Group analyst Michael Lewis.     
"They've outlined the fact that they're investing heavily in the future, which g
 oes heavily into the international arena," said Lewis. "...That does not 
preclude acquisitions and divestitures along the way," he said.     Lewis said 
if American Express reduced its exposure to the brokerage business by selling 
part of shearson, its stock might better reflect other assets, such as the 
travel related services business.     "It could find its true water mark with a 
lesser exposure to brokerage. The value of the other components could command a 
higher multiple because they constitute a higher percentage of the total 
operating earnings of the company," he said.      Lewis said Shearson 
contributed 316 mln in after-tax operating earnings, up from about 200 mln dlrs 
in 1985.      Reuter &#3;  
+OHIO MATTRESS <OMT> MAY HAVE LOWER 1ST QTR NET 19870226201915  Ohio Mattress 
Co said its first quarter, ending February 28, profits may be below the 2.4 mln 
dlrs, or 15 cts a share, earned in the first quarter of fiscal 1986.     The 
company said any decline would be due to expenses related to the acquisitions 
in the middle of the current quarter of seven licensees of Sealy Inc, as well 
as 82 pct of the outstanding capital stock of Sealy.     Because of these 
acquisitions, it said, first quarter sales will be substantially higher than 
last year's 67.1 mln dlrs.     Noting that it typically reports first quarter 
results in late march, said the report is likely to be issued in early April 
this year.     It said the delay is due to administrative considerations, 
including conducting appraisals, in connection with the acquisitions.  Reuter 
&#3;  
+U.S. M-1 MONEY SUPPLY ROSE 2.1 BILLION DLRS    19870226214435  U.S. M-1 money 
supply rose 2.1 billion dlrs to a seasonally adjusted 736.7 billion dlrs in the 
February 16 week, the Federal Reserve said.     The previous week's M-1 level 
was revised to 734.6 billion dlrs from 734.2 billion dlrs, while the four-week 
moving average of M-1 rose to 735.0 billion dlrs from 733.5 billion.     
Economists polled by Reuters said that M-1 should be anywhere from down four 
billion dlrs to up 2.3 billion dlrs. The average forecast called for a 300 mln 
dlr M-1 rise.  Reuter &#3;  
+GENERAL BINDING <GBND> IN MARKETING AGREEMENT  19870226214508  General Binding 
Corp said it reached a marketing agreement with Varitronic Systems Inc, a 
manufacturer and marketer of electronic lettering systems.     Under terms of 
the agreement, General Binding will carry Varitronics' Merlin Express 
Presentation Lettering System, a portable, battery-operated lettering system 
which produces type on adhesive-backed tape.  Reuter &#3;  
+LIBERTY ALL-STAR <USA> SETS INITIAL PAYOUT     19870226214544  Liberty 
All-Star Equity Fund said it declared an initial dividend of five cts per 
share, payable April two to shareholders of record March 20.     It said the 
dividend includes a quarterly dividend of three cts a share and a special 
payout of two cts a share, which covers the period from November three, 1986, 
when the fund began operations, to December 31, 1986.     The fund said its 
quarterly dividend rate may fluctuate in the future.  Reuter &#3;  
+COCA COLA <KO> UNIT AND WORLD FILM IN VENTURE  19870226214745  Coca-Cola Co's 
Entertainment Business Sector Inc unit said it formed a joint venture with an 
affiliate of World Film Services to acquire, produce and distribute television 
programming around the world.     World Film Services was formed by chairman 
John Heyman in 1963 to produce films.      Reuter &#3;  
+FORD MOTOR CREDIT <F> TO REDEEM DEBENTURES     19870226214753  Ford Motor Co 
said its Ford Motor Credit Co on April One will redeem 4.0 mln dlrs of its 8.70 
pct debentures due April 1, 1999.     It said the debentures are redeemable at 
a price of 100 pct of the principal. Because April 1, 1987 is an interest 
payment date on the debentures, no accrued interest will be payable on the 
redemption date as part of the redemption proceeds.     Debentures will be 
selected for redemption on a pro rata basis, Ford said.   Reuter &#3;  
+STERLING SOFTWARE <SSW> NOTE HOLDERS OK BUY    19870226214802  Sterling 
Software Inc said it received consent of a majority of the holders of its eight 
pct convertible sernior subordinated debentures required to purchase shares of 
its common.     The company said it may now buy its stock at its discretion 
depending on market conditions.  Reuter &#3;  
+<SCHULT HOMES CORP> MAKES INITIAL STOCK OFFER  19870226214818  Schult Homes 
Corp announced an initial public offering of 833,334 units at five dlrs per 
unit, said Janney Montgomery Scott Inc and Woolcott and Co, managing 
underwriters of the offering.     They said each unit consists of one common 
share and one warrant to buy one-half share of common.     The warrant will 
entitle holders to buy one-half common share at 5.50 dlrs per full share from 
March one, 1988, to September one, 1989, and thereafter at 6.50 dlrs per full 
share until March 1991, they said.  Reuter &#3;  
+FLUOR <FLR> UNIT GETS CONSTRUCTION CONTRACT    19870226214826  Fluor Corp said 
its Fluor Daniel unit received a contract from Union Carbide Corp <UK> covering 
design, procurement and construction of a 108 megawatt combined cycle 
cogeneration facility in Seadrift, Texas.     The value of the contract was not 
disclosed.  Reuter &#3;  
+SUFFIELD FINANCIAL CORP <SFCP> SELLS STOCK     19870226214835  Suffield 
Financial Corp said   Jon Googel and Benjamin Sisti of Colonial Realty, West 
Hartford, Conn., purchased 175,900 shares of its stock for 3,416,624.     The 
company said the purchase equals 5.2 pct of its outstanding shares.  Reuter 
&#3;  

Reply via email to